input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
# Copyright (C) 2020 <NAME>
# Use of this source code is governed by the MIT License
###############################################################################
from . import config
from .metadata import metadata
from . import linesholder
from . import linesops
from .. import SEED_AVG, SEED_LAST, SEED_SUM, SEED_NONE, SEED_ZERO, SEED_ZFILL
import numpy as np
import pandas as pd
__all__ = ['Line', 'Lines']
def _generate(cls, bases, dct, name='', klass=None, **kwargs):
# If "name" is defined (inputs, outputs) it overrides any previous
# definition from the base clases.
# An extension can be done by using "name_extend" (inputs_extend) in which
# case the definition will be appended to that of the base classes
# In case of a redefinition, automatic mappings to the existing definitions
# (by index) will be done to ensure "instances" do still work in base
# classes when going the super route
# Manual mappings can also be defined if a definition is a dictionary like
# in:
# outputs = {'atr': 'tr'}
# In this case 'atr' is the new output and the base class had a 'tr' output
# and now whenenver 'tr' is referenced it will point to 'atr'
# Get actual lines definition and that of the bases
clsdefs = dct.get(name, ()) # new defs
# support remapping lines in subclasses
cdefs = [] # collect final single new definitions
defmappings = {} # collect any mappings
# one can specify a single input (str) or single remapping (dict)
if isinstance(clsdefs, (dict, str,)):
clsdefs = [clsdefs] # unpacked below
for clsdef in clsdefs:
# if a "line" def contains a list or a tuple, it is expected to have 2
# elements defining a remapping. key=>val where key is the new name and
# value is the old name, defined in the base class. Make it a dict to
# support the general case in which it was already a dict
if isinstance(clsdef, (list, tuple,)):
clsdef = dict([clsdef]) # and go to dict case
if isinstance(clsdef, dict):
cdefs.extend(list(clsdef))
defmappings.update(clsdef) # store mapping to genreate properties
else: # assume str or else detect and raise exception if not
cdefs.append(clsdef)
# After having parsed mappings in dict form, create the actual definition
clsdefs = tuple(cdefs)
# Gather base definitions - needed here to do mappings
lbases = (getattr(base, name, ()) for base in bases)
lbdefs = tuple(ldef for lbase in lbases for ldef in lbase)
if clsdefs: # a new definition was made
final_defs = clsdefs
for clsdef, lbdef in zip(clsdefs, lbdefs): # create automappings
if lbdef in clsdefs: # cannot remap if exists in current defs
continue
defmappings.setdefault(clsdef, lbdef)
else:
# no new definition, see if _extend has been put in place
clsdefs = dct.get(name + '_extend', ()) # new defs
if isinstance(clsdefs, str):
clsdefs = (clsdefs,) # unpacked below
final_defs = lbdefs + clsdefs
# removed remapped lines from definitions
remapped = list(defmappings.values())
# retain last inputs defs - super readable and pythonic one-liner
lines = tuple(reversed(list(dict.fromkeys(reversed(final_defs)))))
lines = tuple(x for x in lines if x not in remapped)
setattr(cls, name, lines) # install all lines defs
# Create base dictionary for subclassing via typ
clsdct = dict(__module__=cls.__module__, __slots__=list(lines))
# Create properties for attribute retrieval of old line
propdct = {}
for name, alias in defmappings.items():
def get_alias_to_name(self):
return getattr(self, name)
def set_alias_to_name(self, value):
setattr(self, name, value)
propdct[alias] = property(get_alias_to_name, set_alias_to_name)
clsdct.update(propdct) # add properties for alias remapping
clsname = name.capitalize() + cls.__name__ # decide name
return type(clsname, (klass,), clsdct) # subclass and return
def binary_op(name):
def real_binary_op(self, other, *args, **kwargs):
# Executes a binary operation where self is guaranteed to have a
# _series attribute but other isn't. Example > or +
# The minimum period is taken into account to only apply the operation
# to the proper range and store in the result in that range. The rest
# is a bunch of leading 'NaN'
# See if other has a minperiod, else default to 1
minperiod = max(self._minperiod, getattr(other, '_minperiod', 1))
minidx = minperiod - 1 # minperiod is 1-based, easier for location
# Prepare a result filled with 'Nan'
result = pd.Series(np.nan, index=self._series.index)
# Get and prepare the other operand
other = getattr(other, '_series', other) # get real other operand
other = other[minidx:] if isinstance(other, pd.Series) else other
# Get the operation, exec and store
binop = getattr(self._series[minidx:], name) # get op from series
result[minidx:] = r = binop(other, *args, **kwargs) # exec / store
result = result.astype(r.dtype, copy=False)
return self._clone(result, period=minperiod) # ret new obj w minperiod
linesops.install_cls(name=name, attr=real_binary_op)
def standard_op(name, parg=None, sargs=False, skwargs=False):
def real_standard_op(self, *args, **kwargs):
# Prepare a result filled with 'Nan'
result = pd.Series(np.nan, index=self._series.index)
# get the series capped to actual period to consider
a = args if sargs else tuple()
kw = kwargs if skwargs else {}
minperiod, minidx, a, kw = self._minperiodize(*a, **kw)
if sargs:
args = a
if skwargs:
kwargs = kw
# get the operation from a view capped to the max minperiod
stdop = getattr(self._series[minidx:], name)
result[minidx:] = r = stdop(*args, **kwargs) # execute and assign
result = result.astype(r.dtype, copy=False) # keep dtype intact
line = self._clone(result, period=minperiod) # create resulting line
if parg: # consider if the operation increases the minperiod
line._minperiod += kwargs.get(parg)
return line
linesops.install_cls(name=name, attr=real_standard_op)
def reduction_op(name, sargs=False, *args, **kwargs):
def real_reduction_op(self, *args, **kwargs):
if sargs:
_, minidx, args, _ = self._minperiodize(*args)
else:
minidx = self._minperiod - 1
red_op = getattr(self._series[minidx:], name)
return red_op(*args, **kwargs)
linesops.install_cls(name=name, attr=real_reduction_op)
# Below if _ewm is called
#
# - Calculating the p1:p2 range which will be used to calculate the
# single seed value with an arithmetic average (i.e.: "mean")
# The following are true for p1 and p2
# - p1 >= 0
# - p2 >= (p1 + self.p.preiod)
# - Creating a [0:p2] long seed array filled with NaN
# - Calculating the mean of input[p1:p2] and putting it a p2
# - Concatenating seed array + rest data and storing it at outputs[0],
# (output name is unknown but: subclasses will have an output)
# The parameter "poffset" allows to start the calulation at an offset. This
# is used to replicate the internal ta-lib behavior with ema when
# calculating the fast ema of the macd, where the start of the delivery of
# data is offset to the period of the slow ema.
# For regular usage, poffset is always 0 and plays no role. If poffset
# didn't exist, the calculation of p1 and p2 would simpler
# - p1 = self._minperiod - 1
# - p2 = p1 + self.p.period
#
# but due to poffset the calculation is made backwards
# - poffset = (poffset or period) # assume here poffset > period
# - p2 = self._minperiod - 1 + poffset # seed end calc
# - p1 = p2 - period # beginning of seed calculation
def multifunc_op(name, parg=None, propertize=False):
class _MultiFunc_Op:
def __init__(self, line, *args, **kwargs):
# plethora of vals needed later in __getattr__/__getitem__
self._is_seeded = False
self._line = line
self._series = series = line._series
self._minperiod = line._minperiod
# if the end user passes alpha=None, it means that the alpha
# calculation for an ewm will be done directy by the caller using
# apply. This can only be achieved if instead of delivering ewm,
# rolling(window=2) is returned (the end user should not do that,
# because the minperiod calculations would be off)
self._alpha_ = None
lsname = name.lstrip('_') # left stripped name (lsname)
# get/pop period related parameter ... as needed for multi-ewm
if lsname == 'ewm':
if 'alpha' in kwargs: # all bets are on 'alpha'
# period cannot be recovered, force the user to specify it
# use a default value of 0 to indicate that the period of
# the calling line has to be used even if alphas carry a
# period. See below the alpha period check against offset
self._pval = kwargs.pop('span', 0)
alpha = kwargs['alpha'] # it | |
root of unity, use the cyclotomic algorithm
algorithm = 'cyclotomic'
break
else:
num = prod(one - q**i for i in range(n-k+1, n+1))
try:
try:
return num // denom
except TypeError:
return num / denom
except (TypeError, ZeroDivisionError):
# use substitution instead
return q_binomial(n, k)(q)
if algorithm == 'cyclotomic':
from sage.rings.polynomial.cyclotomic import cyclotomic_value
return prod(cyclotomic_value(d, q)
for d in range(2, n + 1)
if (n//d) != (k//d) + ((n-k)//d))
else:
raise ValueError("unknown algorithm {!r}".format(algorithm))
def gaussian_binomial(n, k, q=None, algorithm='auto'):
r"""
This is an alias of :func:`q_binomial`.
See :func:`q_binomial` for the full documentation.
EXAMPLES::
sage: gaussian_binomial(4,2)
q^4 + q^3 + 2*q^2 + q + 1
"""
return q_binomial(n, k, q, algorithm)
def q_multinomial(seq, q=None, binomial_algorithm='auto'):
r"""
Return the `q`-multinomial coefficient.
This is also known as the Gaussian multinomial coefficient, and is
defined by
.. MATH::
\binom{n}{k_1, k_2, \ldots, k_m}_q = \frac{[n]_q!}
{[k_1]_q! [k_2]_q! \cdots [k_m]_q!}
where `n = k_1 + k_2 + \cdots + k_m`.
If `q` is unspecified, then the variable is the generator `q` for
a univariate polynomial ring over the integers.
INPUT:
- ``seq`` -- an iterable of the values `k_1` to `k_m` defined above
- ``q`` -- (default: ``None``) the variable `q`; if ``None``, then use a
default variable in `\ZZ[q]`
- ``binomial_algorithm`` -- (default: ``'auto'``) the algorithm to use
in :meth:`~sage.combinat.q_analogues.q_binomial`; see possible values
there
ALGORITHM:
We use the equivalent formula
.. MATH::
\binom{k_1 + \cdots + k_m}{k_1, \ldots, k_m}_q
= \prod_{i=1}^m \binom{\sum_{j=1}^i k_j}{k_i}_q.
EXAMPLES::
sage: from sage.combinat.q_analogues import q_multinomial
sage: q_multinomial([1,2,1])
q^5 + 2*q^4 + 3*q^3 + 3*q^2 + 2*q + 1
sage: q_multinomial([1,2,1], q=1) == multinomial([1,2,1])
True
sage: q_multinomial((3,2)) == q_binomial(5,3)
True
sage: q_multinomial([])
1
"""
binomials = []
partial_sum = 0
for elem in seq:
partial_sum += elem
binomials.append(q_binomial(partial_sum, elem, q=q, algorithm=binomial_algorithm))
return prod(binomials)
gaussian_multinomial = q_multinomial
def q_catalan_number(n, q=None):
"""
Return the `q`-Catalan number of index `n`.
If `q` is unspecified, then it defaults to using the generator `q` for
a univariate polynomial ring over the integers.
There are several `q`-Catalan numbers. This procedure
returns the one which can be written using the `q`-binomial coefficients.
EXAMPLES::
sage: from sage.combinat.q_analogues import q_catalan_number
sage: q_catalan_number(4)
q^12 + q^10 + q^9 + 2*q^8 + q^7 + 2*q^6 + q^5 + 2*q^4 + q^3 + q^2 + 1
sage: p = ZZ['p'].0
sage: q_catalan_number(4,p)
p^12 + p^10 + p^9 + 2*p^8 + p^7 + 2*p^6 + p^5 + 2*p^4 + p^3 + p^2 + 1
The `q`-Catalan number of index `n` is only defined for `n` a
nonnegative integer (:trac:`11411`)::
sage: q_catalan_number(-2)
Traceback (most recent call last):
...
ValueError: argument (-2) must be a nonnegative integer
TESTS::
sage: q_catalan_number(3).parent()
Univariate Polynomial Ring in q over Integer Ring
sage: q_catalan_number(0).parent()
Univariate Polynomial Ring in q over Integer Ring
"""
if n in ZZ:
if n in {0, 1}:
return q_int(1, q)
elif n >= 2:
return (prod(q_int(j, q) for j in range(n + 2, 2 * n + 1)) //
prod(q_int(j, q) for j in range(2, n + 1)))
raise ValueError("argument (%s) must be a nonnegative integer" % n)
def qt_catalan_number(n):
"""
Return the `q,t`-Catalan number of index `n`.
EXAMPLES::
sage: from sage.combinat.q_analogues import qt_catalan_number
sage: qt_catalan_number(1)
1
sage: qt_catalan_number(2)
q + t
sage: qt_catalan_number(3)
q^3 + q^2*t + q*t^2 + t^3 + q*t
sage: qt_catalan_number(4)
q^6 + q^5*t + q^4*t^2 + q^3*t^3 + q^2*t^4 + q*t^5 + t^6 + q^4*t + q^3*t^2 + q^2*t^3 + q*t^4 + q^3*t + q^2*t^2 + q*t^3
The `q,t`-Catalan number of index `n` is only defined for `n` a
nonnegative integer (:trac:`11411`)::
sage: qt_catalan_number(-2)
Traceback (most recent call last):
...
ValueError: Argument (-2) must be a nonnegative integer.
"""
if n in ZZ and n >= 0:
ZZqt = ZZ['q', 't']
d = {}
for dw in DyckWords(n):
tup = (dw.area(), dw.bounce())
d[tup] = d.get(tup, 0) + 1
return ZZqt(d)
else:
raise ValueError("Argument (%s) must be a nonnegative integer." % n)
def q_pochhammer(n, a, q=None):
r"""
Return the `q`-Pochhammer `(a; q)_n`.
The `q`-Pochhammer symbol is defined by
.. MATH::
(a; q)_n = \prod_{k=0}^{n-1} (1 - aq^k)
with `(a; q)_0 = 1` for all `a, q` and `n \in \NN`.
By using the identity
.. MATH::
(a; q)_n = \frac{(a; q)_{\infty}}{(aq^n; q)_{\infty}},
we can extend the definition to `n < 0` by
.. MATH::
(a; q)_n = \frac{1}{(aq^n; q)_{-n}}
= \prod_{k=1}^{-n} \frac{1}{1 - a/q^k}.
EXAMPLES::
sage: from sage.combinat.q_analogues import q_pochhammer
sage: q_pochhammer(3, 1/7)
6/343*q^3 - 6/49*q^2 - 6/49*q + 6/7
sage: q_pochhammer(3, 3)
-18*q^3 + 6*q^2 + 6*q - 2
sage: q_pochhammer(3, 1)
0
sage: R.<q> = ZZ[]
sage: q_pochhammer(4, q)
q^10 - q^9 - q^8 + 2*q^5 - q^2 - q + 1
sage: q_pochhammer(4, q^2)
q^14 - q^12 - q^11 - q^10 + q^8 + 2*q^7 + q^6 - q^4 - q^3 - q^2 + 1
sage: q_pochhammer(-3, q)
1/(-q^9 + q^7 + q^6 + q^5 - q^4 - q^3 - q^2 + 1)
TESTS::
sage: q_pochhammer(0, 2)
1
sage: q_pochhammer(0, 1)
1
sage: q_pochhammer(0, var('a'))
1
We check that :trac:`25715` is fixed::
sage: q_pochhammer(0, 3r)
1
REFERENCES:
- :wikipedia:`Q-Pochhammer_symbol`
"""
if q is None:
q = ZZ['q'].gen()
if n not in ZZ:
raise ValueError("{} must be an integer".format(n))
R = parent(q)
one = R(1)
if n < 0:
return R.prod(one / (one - a/q**-k) for k in range(1, -n+1))
return R.prod((one - a*q**k) for k in range(n))
@cached_function(key=lambda t, q: (_Partitions(t), q))
def q_jordan(t, q=None):
r"""
Return the `q`-Jordan number of `t`.
If `q` is the power of a prime number, the output is the number of
complete flags in `\GF{q}^N` (where `N` is the size of `t`) stable
under a linear nilpotent endomorphism `f_t` whose Jordan type is
given by `t`, i.e. such that for all `i`:
.. MATH::
\dim (\ker f_t^i) = t[0] + \cdots + t[i-1]
If `q` is unspecified, then it defaults to using the generator `q` for
a univariate polynomial ring over the integers.
The result is cached.
INPUT:
- ``t`` -- an integer partition, or an argument accepted by
:class:`Partition`
- ``q`` -- (default: ``None``) the variable `q`; if ``None``, then use a
default variable in `\ZZ[q]`
EXAMPLES::
sage: from sage.combinat.q_analogues import q_jordan
sage: [q_jordan(mu, 2) for mu in Partitions(5)]
[9765, 1029, 213, 93, 29, 9, 1]
sage: [q_jordan(mu, 2) for mu in Partitions(6)]
[615195, 40635, 5643, 2331, 1491, 515, 147, 87, 47, 11, 1]
sage: q_jordan([3,2,1])
16*q^4 + 24*q^3 + 14*q^2 + 5*q + 1
sage: q_jordan([2,1], x)
2*x + 1
If the partition is trivial (i.e. has only one part), we get
the `q`-factorial (in this case, the nilpotent endomorphism is
necessarily `0`)::
sage: from sage.combinat.q_analogues import q_factorial
sage: q_jordan([5]) == q_factorial(5)
True
sage: q_jordan([11], 5) == q_factorial(11, 5)
True
TESTS::
sage: all(multinomial(mu.conjugate()) == q_jordan(mu, 1) for mu in Partitions(6))
True
AUTHOR:
- <NAME> (2012-06-29)
"""
if q is None:
q = ZZ['q'].gen()
if all(part == 0 for part in t):
return parent(q)(1)
tj = 0
res = parent(q)(0)
for i in range(len(t)-1, -1, -1):
ti = t[i]
if ti > tj:
tp = list(t)
tp[i] -= 1
res += q_jordan(tp, q) * q**tj * q_int(ti - tj, q)
tj = ti
return res
def q_subgroups_of_abelian_group(la, mu, q=None, algorithm='birkhoff'):
r"""
Return the `q`-number of subgroups of type ``mu`` in a finite abelian
group of type ``la``.
INPUT:
- ``la`` -- type of the ambient group as a :class:`Partition`
- ``mu`` -- type of the subgroup as a :class:`Partition`
- ``q`` -- (default: ``None``) an indeterminate or a prime number; if
``None``, this defaults to `q \in \ZZ[q]`
- ``algorithm`` -- (default: ``'birkhoff'``) the algorithm to use can be
one of the following:
- ``'birkhoff`` -- use the Birkhoff formula from [Bu87]_
- ``'delsarte'`` -- use the formula from [Delsarte48]_
OUTPUT:
The number of subgroups of type ``mu`` in a group of type ``la`` as a
polynomial in ``q``.
ALGORITHM:
Let `q` | |
<reponame>ecosoft-odoo/mh-doodba
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp import netsvc
from openerp.tools.translate import _
class sale_shop(osv.osv):
_inherit = "sale.shop"
_columns = {
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
}
sale_shop()
class sale_order(osv.osv):
_inherit = "sale.order"
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'shipped': False,
'picking_ids': [],
})
return super(sale_order, self).copy(cr, uid, id, default, context=context)
def shipping_policy_change(self, cr, uid, ids, policy, context=None):
if not policy:
return {}
inv_qty = 'order'
if policy == 'prepaid':
inv_qty = 'order'
elif policy == 'picking':
inv_qty = 'procurement'
return {'value': {'invoice_quantity': inv_qty}}
def write(self, cr, uid, ids, vals, context=None):
if vals.get('order_policy', False):
if vals['order_policy'] == 'prepaid':
vals.update({'invoice_quantity': 'order'})
elif vals['order_policy'] == 'picking':
vals.update({'invoice_quantity': 'procurement'})
return super(sale_order, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
if vals.get('order_policy', False):
if vals['order_policy'] == 'prepaid':
vals.update({'invoice_quantity': 'order'})
if vals['order_policy'] == 'picking':
vals.update({'invoice_quantity': 'procurement'})
order = super(sale_order, self).create(cr, uid, vals, context=context)
return order
# This is False
def _picked_rate(self, cr, uid, ids, name, arg, context=None):
if not ids:
return {}
res = {}
tmp = {}
for id in ids:
tmp[id] = {'picked': 0.0, 'total': 0.0}
cr.execute('''SELECT
p.sale_id as sale_order_id, sum(m.product_qty) as nbr, mp.state as procurement_state, m.state as move_state, p.type as picking_type
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
LEFT JOIN
procurement_order mp on (mp.move_id=m.id)
WHERE
p.sale_id IN %s GROUP BY m.state, mp.state, p.sale_id, p.type''', (tuple(ids),))
for item in cr.dictfetchall():
if item['move_state'] == 'cancel':
continue
if item['picking_type'] == 'in':#this is a returned picking
tmp[item['sale_order_id']]['total'] -= item['nbr'] or 0.0 # Deducting the return picking qty
if item['procurement_state'] == 'done' or item['move_state'] == 'done':
tmp[item['sale_order_id']]['picked'] -= item['nbr'] or 0.0
else:
tmp[item['sale_order_id']]['total'] += item['nbr'] or 0.0
if item['procurement_state'] == 'done' or item['move_state'] == 'done':
tmp[item['sale_order_id']]['picked'] += item['nbr'] or 0.0
for order in self.browse(cr, uid, ids, context=context):
if order.shipped:
res[order.id] = 100.0
else:
res[order.id] = tmp[order.id]['total'] and (100.0 * tmp[order.id]['picked'] / tmp[order.id]['total']) or 0.0
return res
_columns = {
'state': fields.selection([
('draft', 'Draft Quotation'),
('sent', 'Quotation Sent'),
('cancel', 'Cancelled'),
('waiting_date', 'Waiting Schedule'),
('progress', 'Sales Order'),
('manual', 'Sale to Invoice'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
], 'Status', readonly=True,help="Gives the status of the quotation or sales order.\
\nThe exception status is automatically set when a cancel operation occurs \
in the invoice validation (Invoice Exception) or in the picking list process (Shipping Exception).\nThe 'Waiting Schedule' status is set when the invoice is confirmed\
but waiting for the scheduler to run on the order date.", select=True),
'incoterm': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'picking_policy': fields.selection([('direct', 'Deliver each product when available'), ('one', 'Deliver all products at once')],
'Shipping Policy', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""Pick 'Deliver each product when available' if you allow partial delivery."""),
'order_policy': fields.selection([
('manual', 'On Demand'),
('picking', 'On Delivery Order'),
('prepaid', 'Before Delivery'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""On demand: A draft invoice can be created from the sales order when needed. \nOn delivery order: A draft invoice can be created from the delivery order when the products have been delivered. \nBefore delivery: A draft invoice is created from the sales order and must be paid before the products can be delivered."""),
'picking_ids': fields.one2many('stock.picking.out', 'sale_id', 'Related Picking', readonly=True, help="This is a list of delivery orders that has been generated for this sales order."),
'shipped': fields.boolean('Delivered', readonly=True, help="It indicates that the sales order has been delivered. This field is updated only after the scheduler(s) have been launched."),
'picked_rate': fields.function(_picked_rate, string='Picked', type='float'),
'invoice_quantity': fields.selection([('order', 'Ordered Quantities'), ('procurement', 'Shipped Quantities')], 'Invoice on',
help="The sales order will automatically create the invoice proposition (draft invoice).\
You have to choose if you want your invoice based on ordered ", required=True, readonly=True, states={'draft': [('readonly', False)]}),
}
_defaults = {
'picking_policy': 'direct',
'order_policy': 'manual',
'invoice_quantity': 'order',
}
# Form filling
def unlink(self, cr, uid, ids, context=None):
sale_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in sale_orders:
if s['state'] in ['draft', 'cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a confirmed sales order, you must cancel it.\nTo do so, you must first cancel related picking for delivery orders.'))
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def action_view_delivery(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing delivery orders of given sales order ids. It can either be a in a list or in a form view, if there is only one delivery order to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of delivery orders to display
pick_ids = []
for so in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in so.picking_ids]
#choose the view_mode accordingly
if len(pick_ids) > 1:
result['domain'] = "[('id','in',["+','.join(map(str, pick_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_out_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = pick_ids and pick_ids[0] or False
return result
def action_invoice_create(self, cr, uid, ids, grouped=False, states=['confirmed', 'done', 'exception'], date_invoice = False, context=None):
picking_obj = self.pool.get('stock.picking')
res = super(sale_order,self).action_invoice_create( cr, uid, ids, grouped=grouped, states=states, date_invoice = date_invoice, context=context)
for order in self.browse(cr, uid, ids, context=context):
if order.order_policy == 'picking':
picking_obj.write(cr, uid, map(lambda x: x.id, order.picking_ids), {'invoice_state': 'invoiced'})
return res
def action_cancel(self, cr, uid, ids, context=None):
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
sale_order_line_obj = self.pool.get('sale.order.line')
proc_obj = self.pool.get('procurement.order')
for sale in self.browse(cr, uid, ids, context=context):
for pick in sale.picking_ids:
if pick.state not in ('draft', 'cancel'):
raise osv.except_osv(
_('Cannot cancel sales order!'),
_('You must first cancel all delivery order(s) attached to this sales order.'))
if pick.state == 'cancel':
for mov in pick.move_lines:
proc_ids = proc_obj.search(cr, uid, [('move_id', '=', mov.id)])
if proc_ids:
for proc in proc_ids:
wf_service.trg_validate(uid, 'procurement.order', proc, 'button_check', cr)
for r in self.read(cr, uid, ids, ['picking_ids']):
for pick in r['picking_ids']:
wf_service.trg_validate(uid, 'stock.picking', pick, 'button_cancel', cr)
return super(sale_order, self).action_cancel(cr, uid, ids, context=context)
def action_wait(self, cr, uid, ids, context=None):
res = super(sale_order, self).action_wait(cr, uid, ids, context=context)
for o in self.browse(cr, uid, ids):
noprod = self.test_no_product(cr, uid, o, context)
if noprod and o.order_policy=='picking':
self.write(cr, uid, [o.id], {'order_policy': 'manual'}, context=context)
return res
def procurement_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
if line.procurement_id:
res.append(line.procurement_id.id)
return res
# if mode == 'finished':
# returns True if all lines are done, False otherwise
# if mode == 'canceled':
# returns True if there is at least one canceled line, False otherwise
def test_state(self, cr, uid, ids, mode, *args):
assert mode in ('finished', 'canceled'), _("invalid mode for test_state")
finished = True
canceled = False
write_done_ids = []
write_cancel_ids = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
if (not line.procurement_id) or (line.procurement_id.state=='done'):
if line.state != 'done':
write_done_ids.append(line.id)
else:
finished = False
if line.procurement_id:
if (line.procurement_id.state == 'cancel'):
canceled = True
if line.state != 'exception':
write_cancel_ids.append(line.id)
if write_done_ids:
self.pool.get('sale.order.line').write(cr, uid, write_done_ids, {'state': 'done'})
if write_cancel_ids:
self.pool.get('sale.order.line').write(cr, uid, write_cancel_ids, {'state': 'exception'})
if mode == | |
not None:
oprot.writeFieldBegin('responseSize', TType.I64, 4)
oprot.writeI64(self.responseSize)
oprot.writeFieldEnd()
if self.clientAddress is not None:
oprot.writeFieldBegin('clientAddress', TType.STRING, 5)
oprot.writeString(self.clientAddress.encode('utf-8') if sys.version_info[0] == 2 else self.clientAddress)
oprot.writeFieldEnd()
if self.serverClass is not None:
oprot.writeFieldBegin('serverClass', TType.STRING, 6)
oprot.writeString(self.serverClass.encode('utf-8') if sys.version_info[0] == 2 else self.serverClass)
oprot.writeFieldEnd()
if self.methodName is not None:
oprot.writeFieldBegin('methodName', TType.STRING, 7)
oprot.writeString(self.methodName.encode('utf-8') if sys.version_info[0] == 2 else self.methodName)
oprot.writeFieldEnd()
if self.callDetails is not None:
oprot.writeFieldBegin('callDetails', TType.STRING, 8)
oprot.writeString(self.callDetails.encode('utf-8') if sys.version_info[0] == 2 else self.callDetails)
oprot.writeFieldEnd()
if self.param is not None:
oprot.writeFieldBegin('param', TType.STRING, 9)
oprot.writeString(self.param.encode('utf-8') if sys.version_info[0] == 2 else self.param)
oprot.writeFieldEnd()
if self.userName is not None:
oprot.writeFieldBegin('userName', TType.STRING, 10)
oprot.writeString(self.userName.encode('utf-8') if sys.version_info[0] == 2 else self.userName)
oprot.writeFieldEnd()
if self.multiGetsCount is not None:
oprot.writeFieldBegin('multiGetsCount', TType.I32, 11)
oprot.writeI32(self.multiGetsCount)
oprot.writeFieldEnd()
if self.multiMutationsCount is not None:
oprot.writeFieldBegin('multiMutationsCount', TType.I32, 12)
oprot.writeI32(self.multiMutationsCount)
oprot.writeFieldEnd()
if self.multiServiceCalls is not None:
oprot.writeFieldBegin('multiServiceCalls', TType.I32, 13)
oprot.writeI32(self.multiServiceCalls)
oprot.writeFieldEnd()
if self.regionName is not None:
oprot.writeFieldBegin('regionName', TType.STRING, 14)
oprot.writeString(self.regionName.encode('utf-8') if sys.version_info[0] == 2 else self.regionName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.startTime is None:
raise TProtocolException(message='Required field startTime is unset!')
if self.processingTime is None:
raise TProtocolException(message='Required field processingTime is unset!')
if self.queueTime is None:
raise TProtocolException(message='Required field queueTime is unset!')
if self.responseSize is None:
raise TProtocolException(message='Required field responseSize is unset!')
if self.clientAddress is None:
raise TProtocolException(message='Required field clientAddress is unset!')
if self.serverClass is None:
raise TProtocolException(message='Required field serverClass is unset!')
if self.methodName is None:
raise TProtocolException(message='Required field methodName is unset!')
if self.callDetails is None:
raise TProtocolException(message='Required field callDetails is unset!')
if self.param is None:
raise TProtocolException(message='Required field param is unset!')
if self.userName is None:
raise TProtocolException(message='Required field userName is unset!')
if self.multiGetsCount is None:
raise TProtocolException(message='Required field multiGetsCount is unset!')
if self.multiMutationsCount is None:
raise TProtocolException(message='Required field multiMutationsCount is unset!')
if self.multiServiceCalls is None:
raise TProtocolException(message='Required field multiServiceCalls is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TIOError(TException):
"""
A TIOError exception signals that an error occurred communicating
to the HBase master or a HBase region server. Also used to return
more general HBase error conditions.
Attributes:
- message
- canRetry
"""
def __init__(self, message=None, canRetry=None,):
self.message = message
self.canRetry = canRetry
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.canRetry = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TIOError')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
if self.canRetry is not None:
oprot.writeFieldBegin('canRetry', TType.BOOL, 2)
oprot.writeBool(self.canRetry)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TIllegalArgument(TException):
"""
A TIllegalArgument exception indicates an illegal or invalid
argument was passed into a procedure.
Attributes:
- message
"""
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TIllegalArgument')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(TTimeRange)
TTimeRange.thrift_spec = (
None, # 0
(1, TType.I64, 'minStamp', None, None, ), # 1
(2, TType.I64, 'maxStamp', None, None, ), # 2
)
all_structs.append(TColumn)
TColumn.thrift_spec = (
None, # 0
(1, TType.STRING, 'family', 'BINARY', None, ), # 1
(2, TType.STRING, 'qualifier', 'BINARY', None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
)
all_structs.append(TColumnValue)
TColumnValue.thrift_spec = (
None, # 0
(1, TType.STRING, 'family', 'BINARY', None, ), # 1
(2, TType.STRING, 'qualifier', 'BINARY', None, ), # 2
(3, TType.STRING, 'value', 'BINARY', None, ), # 3
(4, TType.I64, 'timestamp', None, None, ), # 4
(5, TType.STRING, 'tags', 'BINARY', None, ), # 5
(6, TType.BYTE, 'type', None, None, ), # 6
)
all_structs.append(TColumnIncrement)
TColumnIncrement.thrift_spec = (
None, # 0
(1, TType.STRING, 'family', 'BINARY', None, ), # 1
(2, TType.STRING, 'qualifier', 'BINARY', None, ), # 2
(3, TType.I64, 'amount', None, 1, ), # 3
)
all_structs.append(TResult)
TResult.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'columnValues', (TType.STRUCT, [TColumnValue, None], False), None, ), # 2
(3, TType.BOOL, 'stale', None, False, ), # 3
(4, TType.BOOL, 'partial', None, False, ), # 4
)
all_structs.append(TAuthorization)
TAuthorization.thrift_spec = (
None, # 0
(1, TType.LIST, 'labels', (TType.STRING, 'UTF8', False), None, ), # 1
)
all_structs.append(TCellVisibility)
TCellVisibility.thrift_spec = (
None, # 0
(1, TType.STRING, 'expression', 'UTF8', None, ), # 1
)
all_structs.append(TGet)
TGet.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'columns', (TType.STRUCT, [TColumn, None], False), None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.STRUCT, 'timeRange', [TTimeRange, None], None, ), # 4
(5, TType.I32, 'maxVersions', None, None, ), # 5
(6, TType.STRING, 'filterString', 'BINARY', None, ), # 6
(7, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 7
(8, TType.STRUCT, 'authorizations', [TAuthorization, None], None, ), # 8
(9, TType.I32, 'consistency', None, None, ), # 9
(10, TType.I32, 'targetReplicaId', None, None, ), # 10
(11, TType.BOOL, 'cacheBlocks', None, None, ), # 11
(12, TType.I32, 'storeLimit', None, None, ), # 12
(13, TType.I32, 'storeOffset', None, None, ), # 13
(14, TType.BOOL, 'existence_only', None, None, ), # 14
(15, TType.STRING, 'filterBytes', 'BINARY', None, ), # 15
)
all_structs.append(TPut)
TPut.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'columnValues', (TType.STRUCT, [TColumnValue, None], False), None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
None, # 4
(5, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 5
(6, TType.I32, 'durability', None, None, ), # 6
(7, TType.STRUCT, 'cellVisibility', [TCellVisibility, None], None, ), # 7
)
all_structs.append(TDelete)
TDelete.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'columns', (TType.STRUCT, [TColumn, None], False), None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.I32, 'deleteType', None, 1, ), # 4
None, # 5
(6, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 6
(7, TType.I32, 'durability', None, None, ), # 7
)
all_structs.append(TIncrement)
TIncrement.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'columns', (TType.STRUCT, [TColumnIncrement, None], False), None, ), # 2
None, # 3
(4, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 4
(5, TType.I32, 'durability', None, None, ), # 5
(6, TType.STRUCT, 'cellVisibility', [TCellVisibility, None], None, ), # 6
(7, TType.BOOL, 'returnResults', None, None, ), # 7
)
all_structs.append(TAppend)
TAppend.thrift_spec = (
None, # 0
(1, TType.STRING, 'row', 'BINARY', None, ), # 1
(2, TType.LIST, 'columns', (TType.STRUCT, [TColumnValue, None], False), None, ), # 2
(3, TType.MAP, 'attributes', (TType.STRING, 'BINARY', TType.STRING, 'BINARY', False), None, ), # 3
(4, TType.I32, 'durability', None, None, ), # 4
(5, TType.STRUCT, 'cellVisibility', [TCellVisibility, None], None, ), # 5
(6, TType.BOOL, 'returnResults', None, None, ), # 6
)
all_structs.append(TScan)
TScan.thrift_spec = (
None, # 0
| |
<gh_stars>0
"""
PySC2_A3C_AtariNetNew.py
A script for training and running an A3C agent on the PySC2 environment, with reference to DeepMind's paper:
[1] Vinyals, Oriol, et al. "Starcraft II: A new challenge for reinforcement learning." arXiv preprint arXiv:1708.04782 (2017).
Advantage estimation uses generalized advantage estimation from:
[2] Schulman, John, et al. "High-dimensional continuous control using generalized advantage estimation." arXiv preprint arXiv:1506.02438 (2015).
Credit goes to <NAME> for providing for reference an implementation of A3C for the VizDoom environment
https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-8-asynchronous-actor-critic-agents-a3c-c88f72a5e9f2
https://github.com/awjuliani/DeepRL-Agents
This follows the AtariNet implementation described in [1].
The agent takes as input all of the features and outputs a policy across all 524 actions, which makes it generalizable to any of the minigames supplied in SC2LE.
"""
import threading
import multiprocessing
import psutil
import numpy as np
import tensorflow as tf
import scipy.signal
from time import sleep
import os
import json
import pickle
from pysc2.env import sc2_env
from pysc2.env import environment
from pysc2.lib import actions
from pysc2.maps import mini_games
"""
Use the following command to launch Tensorboard:
tensorboard --logdir=worker_0:'./train_0',worker_1:'./train_1',worker_2:'./train_2',worker_3:'./train_3'
"""
## HELPER FUNCTIONS
# Copies one set of variables to another.
# Used to set worker network parameters to those of global network.
def update_target_graph(from_scope,to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_var,to_var in zip(from_vars,to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
# Processes PySC2 observations
def process_observation(observation, action_spec, observation_spec):
# is episode over?
episode_end = observation.step_type == environment.StepType.LAST
# reward
reward = observation.reward
# features
features = observation.observation
variable_features = ['cargo', 'multi_select', 'build_queue']
max_no = {'available_actions': len(action_spec.functions), 'cargo': 100, 'multi_select': 100, 'build_queue': 10}
# nonspatial features
nonspatial_stack = []
nonspatial_stack = np.log(features['player'].reshape(-1) + 1.)
nonspatial_stack = np.concatenate((nonspatial_stack, features['game_loop'].reshape(-1)))
nonspatial_stack = np.expand_dims(nonspatial_stack, axis=0)
# spatial_minimap features
minimap_stack = np.stack((features['minimap']), axis=2)
minimap_stack = np.expand_dims(minimap_stack, axis=0)
# spatial_screen features
screen_stack = np.stack((features['screen']), axis=2)
screen_stack = np.expand_dims(screen_stack, axis=0)
return reward, nonspatial_stack, minimap_stack, screen_stack, episode_end
# Discounting function used to calculate discounted returns.
def discount(x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
# Used to initialize weights for policy and value output layers
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def sample_dist(dist):
sample = np.random.choice(dist[0],p=dist[0])
sample = np.argmax(dist == sample)
return sample
## ACTOR-CRITIC NETWORK
class AC_Network():
def __init__(self, scope, trainer, action_spec, observation_spec):
with tf.variable_scope(scope):
# Architecture here follows Atari-net Agent described in [1] Section 4.3
nonspatial_size = 12
minimap_channels = 7
screen_channels = 17
self.inputs_nonspatial = tf.placeholder(shape=[None,nonspatial_size], dtype=tf.float32)
self.inputs_spatial_minimap = tf.placeholder(shape=[None,64,64,minimap_channels], dtype=tf.float32)
self.inputs_spatial_screen = tf.placeholder(shape=[None,64,64,screen_channels], dtype=tf.float32)
self.nonspatial_dense = tf.layers.dense(
inputs=self.inputs_nonspatial,
units=32,
activation=tf.tanh)
self.screen_conv1 = tf.layers.conv2d(
inputs=self.inputs_spatial_screen,
filters=16,
kernel_size=[5,5],
strides=[1,1],
padding='same',
activation=tf.nn.relu)
self.screen_conv2 = tf.layers.conv2d(
inputs=self.screen_conv1,
filters=32,
kernel_size=[3,3],
strides=[1,1],
padding='same',
activation=tf.nn.relu)
self.minimap_conv1 = tf.layers.conv2d(
inputs=self.inputs_spatial_minimap,
filters=16,
kernel_size=[5,5],
strides=[1,1],
padding='same',
activation=tf.nn.relu)
self.minimap_conv2 = tf.layers.conv2d(
inputs=self.minimap_conv1,
filters=32,
kernel_size=[3,3],
strides=[1,1],
padding='same',
activation=tf.nn.relu)
screen_output_length = 1
for dim in self.screen_conv2.get_shape().as_list()[1:]:
screen_output_length *= dim
minimap_output_length = 1
for dim in self.minimap_conv2.get_shape().as_list()[1:]:
minimap_output_length *= dim
self.latent_vector_nonspatial = tf.layers.dense(
inputs=tf.concat([self.nonspatial_dense, tf.reshape(self.screen_conv2,shape=[-1,screen_output_length]), tf.reshape(self.minimap_conv2,shape=[-1,minimap_output_length])], axis=1),
units=256,
activation=tf.nn.relu)
# Output layers for policy and value estimations
# 12 policy networks for base actions and arguments
# - All modeled independently
# - Spatial arguments have the x and y values modeled independently as well
# 1 value network
spatial_arguments = ['screen', 'minimap', 'screen2']
self.policy_base_actions = tf.layers.dense(
inputs=self.latent_vector_nonspatial,
units=len(action_spec.functions),
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_nonspatial = dict()
for arg in action_spec.types:
if arg.name not in spatial_arguments:
self.policy_arg_nonspatial[arg.name] = dict()
for dim, size in enumerate(arg.sizes):
if size == 2:
self.policy_arg_nonspatial[arg.name][dim] = tf.layers.dense(
inputs=self.latent_vector_nonspatial,
units=size,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(1.0))
else:
self.policy_arg_nonspatial[arg.name][dim] = tf.layers.dense(
inputs=self.latent_vector_nonspatial,
units=size,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_spatial = dict()
self.latent_vector_spatial = dict()
for arg in spatial_arguments:
self.latent_vector_spatial[arg] = tf.layers.conv2d(
inputs=tf.concat([self.screen_conv2, self.minimap_conv2], axis=3),
filters=1,
kernel_size=[1,1],
strides=[1,1],
padding='same',
activation=None)
self.policy_arg_spatial[arg] = tf.nn.softmax(tf.reshape(self.latent_vector_spatial[arg], shape=[-1, 64 * 64]))
self.value = tf.layers.dense(
inputs=self.latent_vector_nonspatial,
units=1,
kernel_initializer=normalized_columns_initializer(1.0))
# Only the worker network need ops for loss functions and gradient updating.
# calculates the losses
# self.gradients - gradients of loss wrt local_vars
# applies the gradients to update the global network
if scope != 'global':
self.actions_base = tf.placeholder(shape=[None], dtype=tf.int32)
self.actions_onehot_base = tf.one_hot(self.actions_base, 524, dtype=tf.float32)
self.actions_arg = dict()
self.actions_onehot_arg = dict()
for arg in action_spec.types:
if arg.name not in spatial_arguments:
arg_name = arg.name
self.actions_arg[arg_name] = dict()
self.actions_onehot_arg[arg_name] = dict()
for dim, size in enumerate(arg.sizes):
self.actions_arg[arg_name][dim] = tf.placeholder(shape=[None], dtype=tf.int32)
self.actions_onehot_arg[arg_name][dim] = tf.one_hot(self.actions_arg[arg_name][dim], size, dtype=tf.float32)
self.actions_arg_spatial = dict()
self.actions_onehot_arg_spatial = dict()
for arg in spatial_arguments:
self.actions_arg_spatial[arg] = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_spatial[arg] = tf.one_hot(self.actions_arg_spatial[arg], 64 * 64,dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None], dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.responsible_outputs_base = tf.reduce_sum(self.policy_base_actions * self.actions_onehot_base, [1])
self.responsible_outputs_arg = dict()
for arg_name in self.policy_arg_nonspatial:
self.responsible_outputs_arg[arg_name] = dict()
for dim in self.policy_arg_nonspatial[arg_name]:
self.responsible_outputs_arg[arg_name][dim] = tf.reduce_sum(self.policy_arg_nonspatial[arg_name][dim] * self.actions_onehot_arg[arg_name][dim], [1])
self.responsible_outputs_arg_spatial = dict()
for arg in spatial_arguments:
self.responsible_outputs_arg_spatial[arg] = tf.reduce_sum(self.policy_arg_spatial[arg] * self.actions_onehot_arg_spatial[arg], [1])
# Loss functions
self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value,[-1])))
self.log_policy_base_actions = tf.log(tf.clip_by_value(self.policy_base_actions, 1e-20, 1.0)) # avoid NaN with clipping when value in policy becomes zero
self.entropy_base = - tf.reduce_sum(self.policy_base_actions * self.log_policy_base_actions)
self.entropy_arg = dict()
for arg_name in self.policy_arg_nonspatial:
self.entropy_arg[arg_name] = dict()
for dim in self.policy_arg_nonspatial[arg_name]:
self.entropy_arg[arg_name][dim] = - tf.reduce_sum(self.policy_arg_nonspatial[arg_name][dim] * tf.log(tf.clip_by_value(self.policy_arg_nonspatial[arg_name][dim], 1e-20, 1.0)))
self.entropy_arg_spatial = dict()
for arg in spatial_arguments:
self.entropy_arg_spatial[arg] = - tf.reduce_sum(self.policy_arg_spatial[arg] * tf.log(tf.clip_by_value(self.policy_arg_spatial[arg], 1e-20, 1.)))
self.entropy = self.entropy_base
for arg_name in self.policy_arg_nonspatial:
for dim in self.policy_arg_nonspatial[arg_name]:
self.entropy += self.entropy_arg[arg_name][dim]
for arg in spatial_arguments:
self.entropy += self.entropy_arg_spatial[arg]
self.policy_loss_base = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_base, 1e-20, 1.0)) * self.advantages)
self.policy_loss_arg = dict()
for arg_name in self.policy_arg_nonspatial:
self.policy_loss_arg[arg_name] = dict()
for dim in self.policy_arg_nonspatial[arg_name]:
self.policy_loss_arg[arg_name][dim] = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg[arg_name][dim], 1e-20, 1.0)) * self.advantages)
self.policy_loss_arg_spatial = dict()
for arg in spatial_arguments:
self.policy_loss_arg_spatial[arg] = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_spatial[arg], 1e-20, 1.0))*self.advantages)
self.policy_loss = self.policy_loss_base
for arg_name in self.policy_arg_nonspatial:
for dim in self.policy_arg_nonspatial[arg_name]:
self.policy_loss += self.policy_loss_arg[arg_name][dim]
for arg in spatial_arguments:
self.policy_loss += self.policy_loss_arg_spatial[arg]
self.loss = self.value_loss + self.policy_loss - self.entropy * 0.001
# Get gradients from local network using local losses
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
self.gradients = tf.gradients(self.loss,local_vars)
self.var_norms = tf.global_norm(local_vars)
grads, self.grad_norms = tf.clip_by_global_norm(self.gradients,40.0)
# Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.apply_grads = trainer.apply_gradients(zip(grads, global_vars))
## WORKER AGENT
class Worker():
def __init__(self, name, trainer, model_path, global_episodes, global_steps, map_name, action_spec, observation_spec):
self.name = "worker_" + str(name)
self.number = name
self.model_path = model_path
self.trainer = trainer
self.global_episodes = global_episodes
self.increment_global_episodes = self.global_episodes.assign_add(1)
self.global_steps = global_steps
self.increment_global_steps = self.global_steps.assign_add(1)
self.episode_rewards = []
self.episode_lengths = []
self.episode_mean_values = []
self.summary_writer = tf.summary.FileWriter("train_" + str(self.number))
self.action_spec = action_spec
self.observation_spec = observation_spec
#Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_AC = AC_Network(self.name, trainer, action_spec, observation_spec)
self.update_local_ops = update_target_graph('global', self.name)
print('Initializing environment #{}...'.format(self.number))
self.env = sc2_env.SC2Env(map_name=map_name)
def train(self, rollout, sess, gamma, bootstrap_value):
rollout = np.array(rollout)
obs_minimap = rollout[:,0]
obs_screen = rollout[:,1]
obs_nonspatial = rollout[:,2]
actions_base = rollout[:,3]
actions_args = rollout[:,4]
actions_args_spatial = rollout[:,5]
rewards = rollout[:,6]
next_obs_minimap = rollout[:,7]
next_obs_screen = rollout[:,8]
next_obs_nonspatial = rollout[:,9]
values = rollout[:,11]
actions_arg_stack = dict()
for actions_arg in actions_args:
for arg_name in actions_arg:
if arg_name not in actions_arg_stack:
actions_arg_stack[arg_name] = dict()
for dim in actions_arg[arg_name]:
if dim not in actions_arg_stack[arg_name]:
actions_arg_stack[arg_name][dim] = [actions_arg[arg_name][dim]]
else:
actions_arg_stack[arg_name][dim].append(actions_arg[arg_name][dim])
actions_arg_spatial_stack = dict()
for actions_arg_spatial in actions_args_spatial:
for arg_name,arg_value in actions_arg_spatial.items():
if arg_name not in actions_arg_spatial_stack:
actions_arg_spatial_stack[arg_name] = []
actions_arg_spatial_stack[arg_name].append(arg_value)
# Here we take the rewards and values from the rollout, and use them to calculate the advantage and discounted returns.
# The advantage function uses generalized advantage estimation from [2]
self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])
discounted_rewards = discount(self.rewards_plus,gamma)[:-1]
self.value_plus = np.asarray(values.tolist() + [bootstrap_value])
advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]
advantages = discount(advantages,gamma)
# Update the global network using gradients from loss
# Generate network statistics to periodically save
feed_dict = {self.local_AC.target_v:discounted_rewards,
self.local_AC.inputs_spatial_screen:np.stack(obs_screen).reshape(-1,64,64,17),
self.local_AC.inputs_spatial_minimap:np.stack(obs_minimap).reshape(-1,64,64,7),
self.local_AC.inputs_nonspatial:np.stack(obs_nonspatial).reshape(-1,12),
self.local_AC.actions_base:actions_base,
self.local_AC.advantages:advantages}
for arg_name in actions_arg_stack:
for dim in actions_arg_stack[arg_name]:
feed_dict[self.local_AC.actions_arg[arg_name][dim]] = actions_arg_stack[arg_name][dim]
for arg_name, value in actions_arg_spatial_stack.items():
feed_dict[self.local_AC.actions_arg_spatial[arg_name]] = value
v_l,p_l,e_l,g_n,v_n, _ = sess.run([self.local_AC.value_loss,
self.local_AC.policy_loss,
self.local_AC.entropy,
self.local_AC.grad_norms,
self.local_AC.var_norms,
self.local_AC.apply_grads],
feed_dict=feed_dict)
return v_l / len(rollout), p_l / len(rollout), e_l / len(rollout), g_n,v_n
def work(self,max_episode_length,gamma,sess,coord,saver):
episode_count = sess.run(self.global_episodes)
total_steps = 0
print ("Starting worker " + str(self.number))
with sess.as_default(), sess.graph.as_default():
while not coord.should_stop():
# Download copy of parameters from global network
sess.run(self.update_local_ops)
episode_buffer = []
episode_values = []
episode_frames = []
episode_reward = 0
episode_step_count = 0
# Start new episode
obs = self.env.reset()
episode_frames.append(obs[0])
reward, nonspatial_stack, minimap_stack, screen_stack, episode_end = process_observation(obs[0], self.action_spec, self.observation_spec)
s_minimap = minimap_stack
s_screen = screen_stack
s_nonspatial = nonspatial_stack
while not episode_end:
# Take an action using distributions from policy networks' outputs.
base_action_dist, arg_spatial_dist, arg_nonspatial_dist, v = sess.run([
self.local_AC.policy_base_actions,
self.local_AC.policy_arg_spatial,
self.local_AC.policy_arg_nonspatial,
self.local_AC.value],
feed_dict={
self.local_AC.inputs_spatial_minimap: minimap_stack,
self.local_AC.inputs_spatial_screen: screen_stack,
self.local_AC.inputs_nonspatial: nonspatial_stack})
# Apply filter to remove unavailable actions and then renormalize
base_action_dist[0] += 1e-20
for action_id, action in enumerate(base_action_dist[0]):
if action_id not in obs[0].observation['available_actions']:
base_action_dist[0][action_id] = 0.
base_action_dist[0] /= np.sum(base_action_dist[0])
action_id = sample_dist(base_action_dist)
arg_sample = dict()
for arg_name in arg_nonspatial_dist:
arg_sample[arg_name] = dict()
for dim in arg_nonspatial_dist[arg_name]:
arg_sample[arg_name][dim] = sample_dist(arg_nonspatial_dist[arg_name][dim])
arg_sample_spatial = dict()
arg_sample_spatial_abs = dict()
for arg in arg_spatial_dist:
arg_sample_spatial_abs[arg] = sample_dist(arg_spatial_dist[arg])
arg_sample_spatial[arg] = [arg_sample_spatial_abs[arg] % 64, arg_sample_spatial_abs[arg] / 64]
arguments = []
spatial_arguments = ['screen', 'minimap', 'screen2']
for argument in self.action_spec.functions[action_id].args:
name = argument.name
if name not in spatial_arguments:
argument_value = []
for dim, size in enumerate(argument.sizes):
argument_value.append(arg_sample[name][dim])
else:
argument_value = arg_sample_spatial[name]
arguments.append(argument_value)
# Set unused arguments to -1 so that they won't be updated in the training
# See documentation for tf.one_hot
for arg_name, argument in arg_sample.items():
if arg_name not in self.action_spec.functions[action_id].args:
for dim in argument:
arg_sample[arg_name][dim] = -1
for arg_name, arg in arg_sample_spatial_abs.items():
if arg_name not in self.action_spec.functions[action_id].args:
arg_sample_spatial_abs[arg_name] = -1
a = actions.FunctionCall(action_id, arguments)
obs = self.env.step(actions=[a])
r, nonspatial_stack, minimap_stack, screen_stack, episode_end = process_observation(obs[0], self.action_spec, self.observation_spec)
if not episode_end:
episode_frames.append(obs[0])
s1_minimap = minimap_stack
s1_screen = screen_stack
s1_nonspatial = nonspatial_stack
else:
s1_minimap = s_minimap
s1_screen = s_screen
s1_nonspatial = s_nonspatial
# Append latest state to buffer
episode_buffer.append([s_minimap, s_screen, s_nonspatial,action_id,arg_sample,arg_sample_spatial_abs,r,s1_minimap, s1_screen, s1_nonspatial,episode_end,v[0,0]])
episode_values.append(v[0,0])
episode_reward += r
s_minimap = s1_minimap
s_screen = s1_screen
s_nonspatial = s1_nonspatial
sess.run(self.increment_global_steps)
total_steps += 1
episode_step_count += 1
# If the episode hasn't ended, but the experience buffer is full, then we make an update step using that experience rollout.
if len(episode_buffer) == 40 and not episode_end and episode_step_count != max_episode_length - 1:
# Since we don't know what the true final return is, we "bootstrap" from our current value estimation.
v1 = sess.run(self.local_AC.value,
feed_dict={self.local_AC.inputs_spatial_minimap: minimap_stack, self.local_AC.inputs_spatial_screen: screen_stack,self.local_AC.inputs_nonspatial: nonspatial_stack})[0,0]
v_l,p_l,e_l,g_n,v_n = self.train(episode_buffer,sess,gamma,v1)
episode_buffer = []
sess.run(self.update_local_ops)
if episode_end:
break
self.episode_rewards.append(episode_reward)
self.episode_lengths.append(episode_step_count)
self.episode_mean_values.append(np.mean(episode_values))
episode_count += 1
episode_reward = obs[0].observation['score_cumulative'][0]
global _max_score, _running_avg_score
if _max_score < episode_reward:
_max_score = episode_reward
_running_avg_score = (2.0 / 101) * (episode_reward - _running_avg_score) + _running_avg_score
print("{} Step #{} Episode #{} Reward: {}".format(self.name, total_steps, episode_count, episode_reward))
print("Total Steps: {}\tTotal Episodes: {}\tMax Score: {}\tAvg Score: {}".format(sess.run(self.global_steps), sess.run(self.global_episodes), _max_score, | |
("Path to file containing set of stop terms, one term "
"per line."),
'required': True
}
}
def __init__(self, session, config, parent):
FileAssistedNormalizer.__init__(self, session, config, parent)
self.stoplist = {}
lines = self._processPath(session, 'stoplist')
for sw in lines:
self.stoplist[sw.strip()] = 1
def process_string(self, session, data):
if (data in self.stoplist):
return None
else:
return data
class TokenExpansionNormalizer(FileAssistedNormalizer):
""" Expand acronyms or compound words.
Only works with tokens NOT exact strings.
"""
expansions = {}
_possiblePaths = {
'expansions': {
'docs': ("Path to file containing set of expansions, one "
"expansion per line. First token in line is taken to be "
"the thing to be expanded, remaining tokens are what "
"occurences should be replaced with."),
'required': True
}
}
_possibleSettings = {
'keepOriginal': {
'docs': ("Should the original token be kept as well as its "
"expansion (e.g. potentialy useful when browsing). "
"Defaults to False."),
'type': int,
'options': "0|1"
}
}
def __init__(self, session, config, parent):
FileAssistedNormalizer.__init__(self, session, config, parent)
self.expansions = {}
self.keepOriginal = self.get_setting(session, 'keepOriginal', 0)
lines = self._processPath(session, 'expansions')
for exp in lines:
bits = unicode(exp).split()
self.expansions[bits[0]] = bits[1:]
def process_string(self, session, data):
try:
return ' '.join(self.expansions[data])
except KeyError:
return data
def process_hash(self, session, data):
kw = {}
if not len(data):
return kw
keep = self.keepOriginal
process = self.process_string
map = self.expansions
for d in data.itervalues():
if 'positions' in d or 'charOffsets' in d:
raise NotImplementedError
t = d['text']
if (t in map):
dlist = map[t]
for new in dlist:
if (new in kw):
kw[new]['occurences'] += 1
else:
nd = d.copy()
nd['text'] = new
kw[new] = nd
if keep:
kw[t] = d
else:
kw[t] = d
return kw
class StemNormalizer(SimpleNormalizer):
"""Use a Snowball stemmer to stem the terms."""
stemmer = None
_possibleSettings = {
'language': {
'docs': ("Language to create a stemmer for, defaults to "
"english."),
'options': ("danish|dutch|english|finnish|french|german|"
"italian|norwegian|porter|portuguese|russian|"
"spanish|swedish")
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
if Stemmer is None:
raise MissingDependencyException(self.objectType,
"zopyx.txng3.ext"
)
lang = self.get_setting(session, 'language', 'english')
try:
self.stemmer = Stemmer.Stemmer(lang)
except:
raise ConfigFileException("Unknown stemmer language: "
"%s" % (lang))
def process_string(self, session, data):
if (type(data) != type(u"")):
data = unicode(data, 'utf-8')
return self.stemmer.stem([data])[0]
class PhraseStemNormalizer(SimpleNormalizer):
"""Use a Snowball stemmer to stem multiple words in a phrase.
Deprecated: Should instead use normalizer after tokenizer and before
tokenMerger.
"""
stemmer = None
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
if Stemmer is None:
raise MissingDependencyException(self.objectType,
"zopyx.txng3.ext"
)
lang = self.get_setting(session, 'language', 'english')
self.punctuationRe = re.compile(
"((?<!s)'|[-.,]((?=\s)|$)|(^|(?<=\s))[-.,']|"
"[~`!@+=\#\&\^*()\[\]{}\\\|\":;<>?/])"
)
try:
self.stemmer = Stemmer.Stemmer(lang)
except:
raise ConfigFileException("Unknown stemmer language: %s" %
(lang))
def process_string(self, session, data):
if (type(data) != type(u"")):
data = unicode(data, 'utf-8')
s = self.punctuationRe.sub(' ', data)
wds = data.split()
stemmed = self.stemmer.stem(wds)
return ' '.join(stemmed)
class PhoneticNormalizer(SimpleNormalizer):
u"""Carries out phonetic normalization.
Currently fairly simple normalization after "Introduction to Information
Retrieval" by <NAME>, <NAME> & <NAME>
except that length of final term is configurable (not hard-coded to 4
characters.)"""
_possibleSettings = {
'termSize': {
'docs': ("Number of characters to reduce/pad the phonetically "
"normalized term to. If not a positive integer no "
"reduction/padding applied (default)."),
'type': int
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
self.nChars = self.get_setting(session, 'termSize', 0)
self.re0 = re.compile('[aeiouhwy]+', re.IGNORECASE | re.UNICODE)
self.re1 = re.compile('[bfpv]+', re.IGNORECASE | re.UNICODE)
self.re2 = re.compile('[cgjkqsxz]+', re.IGNORECASE | re.UNICODE)
self.re3 = re.compile('[dt]+', re.IGNORECASE | re.UNICODE)
self.re4 = re.compile('[l]+', re.IGNORECASE | re.UNICODE)
self.re5 = re.compile('[mn]+', re.IGNORECASE | re.UNICODE)
self.re6 = re.compile('[r]+', re.IGNORECASE | re.UNICODE)
def process_string(self, session, data):
# 0. Prepare by stripping leading/trailing whitespace
data = data.strip()
# 1. Retain the first letter of the term.
# 2. Change all occurrences of the following letters to '0' (zero):
# 'A', E', 'I', 'O', 'U', 'H', 'W', 'Y'.
# 3. Change letters to digits as follows:
# B, F, P, V to 1. C, G, J, K, Q, S, X, Z -> 2
# D,T to 3. L -> 4
# M, N -> 5
# R -> 6.
# 4. Repeatedly remove one out of each pair of consecutive identical
# digits.
tail = data[1:]
for i, regex in enumerate([self.re0, self.re1, self.re2, self.re3,
self.re4, self.re5, self.re6]):
tail = regex.sub(str(i), tail)
# 5. Remove all zeros from the resulting string.
tail = tail.replace('0', '')
result = data[0] + tail
if self.nChars:
# Pad the resulting string with trailing zeros and return the first
# self.nChars positions
result = '{0:0<{1}}'.format(result[:self.nChars], self.nChars)
if type(data) == unicode:
return unicode(result)
else:
return result
class DateStringNormalizer(SimpleNormalizer):
"""Turns a Date object into ISO8601 format."""
def process_string(self, session, data):
# str() defaults to iso8601 format
return str(data)
class DateYearNormalizer(SimpleNormalizer):
"""Normalizes a date in ISO8601 format to simply a year
Very crude implementation, simply returns first 4 characters.
"""
def process_string(self, session, data):
return data[:4]
class IdToFilenameNormalizer(SimpleNormalizer):
"""Turn an id into a filename with appropriate extension(s).
Extension to use is a configurable setting, defaults to .xml
"""
_possibleSettings = {
'extension': {
'docs': ("File extension (including leading period / stop) to "
"append to given id to produce and appropriate "
"filename."),
'type': str,
'default': '.xml'
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
self.ext = self.get_setting(session, 'extension', '.xml')
def process_string(self, session, data):
return str(data) + self.ext
class FilenameToIdNormalizer(SimpleNormalizer):
""" Turn a filename into an id by stripping off the filename extension.
Only strips off the final extension, including the period / stop.
"""
def process_string(self, session, data):
id, ext = os.path.splitext(data)
return id
class RangeNormalizer(SimpleNormalizer):
""" XXX: This is actually a job for a TokenMerger. Deprecated"""
def process_hash(self, session, data):
# Need to step through positions in order
kw = {}
vals = data.values()
if not vals:
return kw
prox = 'positions' in vals[0]
if not prox:
# Bad. Assume low -> high order
tmplist = [(d['text'], d) for d in vals]
else:
# Need to duplicate across occs, as all in same hash from record
tmplist = []
for d in vals:
for x in range(0, len(d['positions']), 2):
tmplist.append(("%s-%s" %
(d['positions'][x], d['positions'][x + 1]),
d))
tmplist.sort()
for t in range(0, len(tmplist), 2):
base = tmplist[t][1]
try:
text = base['text'] + " " + tmplist[t + 1][1]['text']
except:
text = base['text'] + " " + base['text']
base['text'] = text
try:
del base['positions']
except:
pass
kw[text] = base
return kw
class UnicodeCollationNormalizer(SimpleNormalizer):
""" Use pyuca to create sort key for string
Only, but Very, useful for sorting
"""
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
keyPath = self.get_path(session, 'keyFile', 'allkeys.txt')
# This is handy -- means if no pyuca, no problem
from pyuca import Collator
self.collator = Collator(keyPath)
def process_string(self, session, data):
# fix eszett sorting
data = data.replace(u'\u00DF', 'ss')
ints = self.collator.sort_key(data)
exp = ["%04d" % i for i in ints]
return ''.join(exp)
class DiacriticNormalizer(SimpleNormalizer):
"""Normalizer to turn XML entities into their closes ASCII approximation.
Slow implementation of Unicode 4.0 character decomposition.
Eg that &eacute; -> e
"""
map = {}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
# Decomposition as per Unicode 4.0 Data file
self.map = {
u"\u00A7": u"Section",
u"\u00A9": u"(c)",
# Exhaustive accented alphabetical, diacrytics and ligatures
u"\u00C0": u"\u0041",
u"\u00C1": u"\u0041",
u"\u00C2": u"\u0041",
u"\u00C3": u"\u0041",
u"\u00C4": u"\u0041",
u"\u00C5": u"\u0041",
u"\u00C6": u"AE",
u"\u00C7": u"\u0043",
u"\u00C8": u"\u0045",
u"\u00C9": u"\u0045",
u"\u00CA": u"\u0045",
u"\u00CB": u"\u0045",
u"\u00CC": u"\u0049",
u"\u00CD": u"\u0049",
u"\u00CE": u"\u0049",
u"\u00CF": u"\u0049",
u"\u00D0": u"\u0044",
u"\u00D1": u"\u004E",
u"\u00D2": u"\u004F",
u"\u00D3": u"\u004F",
u"\u00D4": u"\u004F",
u"\u00D5": u"\u004F",
u"\u00D6": u"\u004F",
u"\u00D7": u"x",
u"\u00D8": u"O",
u"\u00D9": u"\u0055",
u"\u00DA": u"\u0055",
u"\u00DB": u"\u0055",
u"\u00DC": u"\u0055",
u"\u00DD": u"\u0059",
u"\u00DE": u"TH",
u"\u00DF": u"ss",
u"\u00E0": u"\u0061",
u"\u00E1": u"\u0061",
u"\u00E2": u"\u0061",
u"\u00E3": u"\u0061",
u"\u00E4": u"\u0061",
u"\u00E5": u"\u0061",
u"\u00E6": u"\u0061\u0065",
u"\u00E7": u"\u0063",
u"\u00E8": u"\u0065",
u"\u00E9": u"\u0065",
u"\u00EA": u"\u0065",
u"\u00EB": u"\u0065",
u"\u00EC": u"\u0069",
u"\u00ED": u"\u0069",
u"\u00EE": u"\u0069",
u"\u00EF": u"\u0069",
u"\u00F0": u"\u0064",
u"\u00F1": u"\u006E",
u"\u00F2": u"\u006F",
u"\u00F3": u"\u006F",
u"\u00F4": u"\u006F",
u"\u00F5": u"\u006F",
u"\u00F6": u"\u006F",
u"\u00F7": u"/",
u"\u00F8": u"\u006F",
u"\u00F9": u"\u0075",
u"\u00FA": u"\u0075",
u"\u00FB": u"\u0075",
u"\u00FC": u"\u0075",
u"\u00FD": u"\u0079",
u"\u00FE": u"th",
u"\u00FF": | |
),
delete_after = 5
)
row = column = -1
valid = False
continue
# The direction is valid, set the offsets
if valid:
for offset in range(ship["length"]):
board.set_at(row_value(offset), column, ship["number"])
break
# The ship would exceed the vertical bounds
else:
row = column = -1
await self.member.send(
embed = get_error_message("You cannot make your ship vertical. It's too {}".format(
"low" if str(reaction) == DOWN else "high"
)),
delete_after = 5
)
continue
# Make sure the spots to the right of this row and column are open
# given the size of the current ship and that the ship does not go out of bounds
elif str(reaction) in [LEFT, RIGHT]:
comparison = (
(column + (ship["length"] - 1) < board.width)
if str(reaction) == RIGHT else
(column - (ship["length"] - 1) >= 0)
)
column_value = lambda offset: (column + offset) if str(reaction) == RIGHT else (column - offset)
# Check that the ship would not exceed the horizontal bounds
if comparison:
valid = True
for offset in range(ship["length"]):
if board.get_at(row, column_value(offset)) is not None:
await self.member.send(
embed = get_error_message(
"The spaces to the {} of ({},{}) are not open. Please choose another place.".format(
"right" if str(reaction) == RIGHT else "left",
row, column
)),
delete_after = 5
)
row = column = -1
valid = False
continue
# The direction is valid, set the offsets
if valid:
for offset in range(ship["length"]):
board.set_at(row, column_value(offset), ship["number"])
break
# The ship would exceed the horizontal bounds
else:
row = column = -1
await self.member.send(
embed = get_error_message("You cannot make your ship horizontal. It's too far to the {}".format(
"right" if str(reaction) == RIGHT else "left"
)),
delete_after = 5
)
continue
# The row and column is not open
else:
row = column = -1
await self.member.send(
embed = get_error_message("That row and column is already occupied! Choose a new place."),
delete_after = 5
)
# Let the player know they must wait until the other player
# finishes setting up their board unless this player is the one the other person was waiting on
self.board = board
if self.message: # Only remove the message if it exists currently. We reset this so the direction reactions dont stick
# during gameplay for this player
await self.message.delete()
self.message = None
if not game.did_opponent_submit(self) and not self.is_ai:
await self.member.send(
embed = Embed(
title = "Now you just wait!",
description = "Once {} finishes setting up their board, the game will start!".format(
(game.opponent if game.opponent.id != self.id else game.challenger).get_name()
),
colour = await get_embed_color(self.member)
),
delete_after = 5
)
async def process_turn(self, game):
"""Processes the turn for this player
:param game: The game object that this player is connected to
:rtype: BattleshipBoard.HIT | BattleshipBoard.MISS
"""
# Check if the player is an AI
if self.is_ai:
# Check if there was a last hit and that the AI is smart
result = None
if self.last_hit and self.is_smart:
row = self.last_hit[0]
column = self.last_hit[1]
# Check if a direction exists, continue to go towards that direction
if self.current_direction:
row += self.current_direction[0]
column += self.current_direction[1]
# Check if the new row and column exceed the board boundaries
# if so, reverse the direction
if row < 0 or row >= game.get_current_board().height or column < 0 or column > game.get_current_board().width:
self.current_direction = (
-self.current_direction[0],
-self.current_direction[1]
)
row += self.current_direction[0]
column += self.current_direction[1]
# Continue generating a new position until the position is not in shots
while (row, column) in game.get_current_board().shots:
row += self.current_direction[0]
column += self.current_direction[1]
result = game.get_current_board().fire(row, column)
# If there was a hit, save the last hit and check if the ship was sunk
if result == BattleshipBoard.HIT:
self.last_hit = row, column
if game.get_current_board().did_ship_sink(game.get_current_board().get_at(*self.last_hit)):
self.last_hit = None
self.current_direction = None
# If there was not a hit, check if the last hit was not sunk yet
# if not, reverse the direction
else:
if not game.get_current_board().did_ship_sink(game.get_current_board().get_at(*self.last_hit)):
self.current_direction = (
-self.current_direction[0],
-self.current_direction[1]
)
# The current direction does not exist, try finding adjacent spots
else:
# Find all adjacent spots around the chosen spot and add the
# directions to tried_directions wherever the AI has already gone previously
for temp_dir in DIRECTIONS:
temp_chosen = (
1 if str(temp_dir) == DOWN else (-1 if str(temp_dir) == UP else 0),
1 if str(temp_dir) == RIGHT else (-1 if str(temp_dir) == LEFT else 0)
)
temp_row = row + temp_chosen[0]
temp_column = column + temp_chosen[1]
# Check if the temp_row, temp_column exceed the boards size, add them to the tried directions
if temp_row < 0 or temp_row >= game.get_current_board().height or temp_column < 0 or temp_column >= game.get_current_board().width:
self.tried_directions.append(temp_dir)
# Check if the temp_row, temp_column has already been made
if (temp_row, temp_column) in game.get_current_board().shots and temp_dir not in self.tried_directions:
self.tried_directions.append(temp_dir)
# While the AI chose a direction that's already been tried, have them continue choosing
direction = choice(DIRECTIONS)
while direction in self.tried_directions:
direction = choice(DIRECTIONS)
# The direction has been chosen
chosen_direction = (
1 if str(direction) == DOWN else (-1 if str(direction) == UP else 0),
1 if str(direction) == RIGHT else (-1 if str(direction) == LEFT else 0)
)
# Attempt to make the hit if the spot is open
# if the spot is not open, continue moving in the current direction
# this will fix itself once an empty spot has been reached
# The AI's next turn will move in the opposite direction
row = self.last_hit[0] + chosen_direction[0]
column = self.last_hit[1] + chosen_direction[1]
while (row, column) in game.get_current_board().shots:
row += chosen_direction[0]
column += chosen_direction[1]
result = game.get_current_board().fire(row, column)
# If there was a hit, the current direction has been decided
if result == BattleshipBoard.HIT:
self.current_direction = chosen_direction
self.tried_directions = []
# There was no hit, add the direction to the tried directions
else:
self.tried_directions.append(str(direction))
# There was no last hit or the AI is not smart, make a random hit
else:
# Continue generating a random row and column while the chosen row and column have already been taken
row, column = randint(0, game.get_current_board().height - 1), randint(0, game.get_current_board().width - 1)
while (row, column) in game.get_current_board().shots:
row, column = randint(0, game.get_current_board().height - 1), randint(0, game.get_current_board().width - 1)
# Check if there was a hit
result = game.get_current_board().fire(row, column)
if result == BattleshipBoard.HIT:
self.last_hit = row, column
# Sleep for 2 seconds to simulate a decision
await sleep(2)
return result
# The player is not an AI, wait for them to choose a place to make a shot
else:
row = column = -1
while True:
# Wait for the player to react with the column and row they want to go
def check_reaction(reaction, user):
return (
reaction.message.id == self.message.id and
user.id == self.id and
str(reaction) in (BATTLESHIP_REACTIONS + [QUIT])
)
done, pending = await wait([
game.bot.wait_for("reaction_add", check = check_reaction),
game.bot.wait_for("reaction_remove", check = check_reaction)
], return_when = FIRST_COMPLETED)
reaction, user = done.pop().result()
for future in pending:
future.cancel()
# Check if the player wants to QUIT the BattleshipGame
if str(reaction) == QUIT:
return BattleshipPlayer.QUIT
# The player does not want to quit, let them choose their row and column
else:
# Check if the player needs to choose a column
if column == -1:
column = BATTLESHIP_REACTIONS.index(str(reaction))
else:
row = BATTLESHIP_REACTIONS.index(str(reaction))
# Make sure this move is legal
if (row, column) not in game.get_current_board().shots:
break
# The move is not legal
else:
row = column = -1
await self.member.send(
embed = get_error_message("You've already gone there! Choose a new place."),
delete_after = 5
)
# Make the players requested shot
return game.get_current_board().fire(row, column)
async def show_board(self, game):
"""Shows the opposite player's board depending on who is the current player.
If the current player is this player, it will show the opponents board
If the current player is the opposite player, it will show this player's board
:param game: The game object that this | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# hide
# skip
from fastai.text.all import *
! [-e / content] & & pip install - Uqq fastai # upgrade fastai on colab
# +
# all_slow
# -
# # Transfer learning in text
#
# > How to fine-tune a language model and train a classifier
# In this tutorial, we will see how we can train a model to classify text (here based on their sentiment). First we will see how to do this quickly in a few lines of code, then how to get state-of-the art results using the approach of the [ULMFit paper](https://arxiv.org/abs/1801.06146).
#
# We will use the IMDb dataset from the paper [Learning Word Vectors for Sentiment Analysis](https://ai.stanford.edu/~amaas/data/sentiment/), containing a few thousand movie reviews.
# ## Train a text classifier from a pretrained model
# We will try to train a classifier using a pretrained model, a bit like we do in the [vision tutorial](http://docs.fast.ai/tutorial.vision). To get our data ready, we will first use the high-level API:
# ## Using the high-level API
# We can download the data and decompress it with the following command:
path = untar_data(URLs.IMDB)
path.ls()
(path / 'train').ls()
# The data follows an ImageNet-style organization, in the train folder, we have two subfolders, `pos` and `neg` (for positive reviews and negative reviews). We can gather it by using the `TextDataLoaders.from_folder` method. The only thing we need to specify is the name of the validation folder, which is "test" (and not the default "valid").
dls = TextDataLoaders.from_folder(untar_data(URLs.IMDB), valid='test')
# We can then have a look at the data with the `show_batch` method:
dls.show_batch()
# We can see that the library automatically processed all the texts to split then in *tokens*, adding some special tokens like:
#
# - `xxbos` to indicate the beginning of a text
# - `xxmaj` to indicate the next word was capitalized
#
# Then, we can define a `Learner` suitable for text classification in one line:
learn = text_classifier_learner(dls, AWD_LSTM, drop_mult=0.5, metrics=accuracy)
# We use the [AWD LSTM](https://arxiv.org/abs/1708.02182) architecture, `drop_mult` is a parameter that controls the magnitude of all dropouts in that model, and we use `accuracy` to track down how well we are doing. We can then fine-tune our pretrained model:
learn.fine_tune(4, 1e-2)
learn.fine_tune(4, 1e-2)
# Not too bad! To see how well our model is doing, we can use the `show_results` method:
learn.show_results()
# And we can predict on new texts quite easily:
learn.predict("I really liked that movie!")
# Here we can see the model has considered the review to be positive. The second part of the result is the index of "pos" in our data vocabulary and the last part is the probabilities attributed to each class (99.1% for "pos" and 0.9% for "neg").
#
# Now it's your turn! Write your own mini movie review, or copy one from the Internet, and we can see what this model thinks about it.
# ### Using the data block API
# We can also use the data block API to get our data in a `DataLoaders`. This is a bit more advanced, so fell free to skip this part if you are not comfortable with learning new APIs just yet.
#
# A datablock is built by giving the fastai library a bunch of information:
#
# - the types used, through an argument called `blocks`: here we have images and categories, so we pass `TextBlock` and `CategoryBlock`. To inform the library our texts are files in a folder, we use the `from_folder` class method.
# - how to get the raw items, here our function `get_text_files`.
# - how to label those items, here with the parent folder.
# - how to split those items, here with the grandparent folder.
imdb = DataBlock(blocks=(TextBlock.from_folder(path), CategoryBlock),
get_items=get_text_files,
get_y=parent_label,
splitter=GrandparentSplitter(valid_name='test'))
# This only gives a blueprint on how to assemble the data. To actually create it, we need to use the `dataloaders` method:
dls = imdb.dataloaders(path)
# ## The ULMFiT approach
# The pretrained model we used in the previous section is called a language model. It was pretrained on Wikipedia on the task of guessing the next word, after reading all the words before. We got great results by directly fine-tuning this language model to a movie review classifier, but with one extra step, we can do even better: the Wikipedia English is slightly different from the IMDb English. So instead of jumping directly to the classifier, we could fine-tune our pretrained language model to the IMDb corpus and *then* use that as the base for our classifier.
#
# One reason, of course, is that it is helpful to understand the foundations of the models that you are using. But there is another very practical reason, which is that you get even better results if you fine tune the (sequence-based) language model prior to fine tuning the classification model. For instance, in the IMDb sentiment analysis task, the dataset includes 50,000 additional movie reviews that do not have any positive or negative labels attached in the unsup folder. We can use all of these reviews to fine tune the pretrained language model — this will result in a language model that is particularly good at predicting the next word of a movie review. In contrast, the pretrained model was trained only on Wikipedia articles.
#
# The whole process is summarized by this picture:
#
# 
# ### Fine-tuning a language model on IMDb
# We can get our texts in a `DataLoaders` suitable for language modeling very easily:
dls_lm = TextDataLoaders.from_folder(path, is_lm=True, valid_pct=0.1)
# We need to pass something for `valid_pct` otherwise this method will try to split the data by using the grandparent folder names. By passing `valid_pct=0.1`, we tell it to get a random 10% of those reviews for the validation set.
#
# We can have a look at our data using `show_batch`. Here the task is to guess the next word, so we can see the targets have all shifted one word to the right.
dls_lm.show_batch(max_n=5)
# Then we have a convenience method to directly grab a `Learner` from it, using the `AWD_LSTM` architecture like before. We use accuracy and perplexity as metrics (the later is the exponential of the loss) and we set a default weight decay of 0.1. `to_fp16` puts the `Learner` in mixed precision, which is going to help speed up training on GPUs that have Tensor Cores.
learn = language_model_learner(dls_lm, AWD_LSTM, metrics=[accuracy, Perplexity()], path=path, wd=0.1).to_fp16()
# By default, a pretrained `Learner` is in a frozen state, meaning that only the head of the model will train while the body stays frozen. We show you what is behind the fine_tune method here and use a fit_one_cycle method to fit the model:
learn.fit_one_cycle(1, 1e-2)
# This model takes a while to train, so it's a good opportunity to talk about saving intermediary results.
# You can easily save the state of your model like so:
learn.save('1epoch')
# It will create a file in `learn.path/models/` named "1epoch.pth". If you want to load your model on another machine after creating your `Learner` the same way, or resume training later, you can load the content of this file with:
learn = learn.load('1epoch')
# We can them fine-tune the model after unfreezing:
learn.unfreeze()
learn.fit_one_cycle(10, 1e-3)
# Once this is done, we save all of our model except the final layer that converts activations to probabilities of picking each token in our vocabulary. The model not including the final layer is called the *encoder*. We can save it with `save_encoder`:
learn.save_encoder('finetuned')
# > Jargon: Encoder: The model not including the task-specific final layer(s). It means much the same thing as *body* when applied to vision CNNs, but tends to be more used for NLP and generative models.
# Before using this to fine-tune a classifier on the reviews, we can use our model to generate random reviews: since it's trained to guess what the next word of the sentence is, we can use it to write new reviews:
TEXT = "I liked this movie | |
<reponame>SwellMai/Terminator-800
#!/usr/bin/env python
import rospy
import os
import message_filters
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseStamped, Twist
import numpy as np
from tf.transformations import euler_from_quaternion
import math
class node():
def __init__(self, x, y, g_cost=0.0, h_cost=0.0, f_cost=0.0):
self.x = x
self.y = y
self.g_cost = g_cost
self.h_cost = h_cost
self.f_cost = f_cost
self.parent = None
def __str__(self):
return "x:"+str(self.x)+", y:"+str(self.y)+", g_cost"+str(self.g_cost)+", f_cost"+str(self.f_cost)
class A_Star():
def __init__(self, startx, starty, goalx, goaly, grid_map):
self.open = []
self.closed = []
self.grid_map = grid_map
self.step = grid_map.astype(np.str)
self.start_node = node(startx, starty)
self.end_node = None
startx_trans, starty_trans = self.transfer_to_array_coordinate(
self.start_node)
while self.grid_map[startx_trans, starty_trans] != 0:
startx_trans = startx_trans -1
starty_trans = starty_trans -1
assert self.grid_map[startx_trans, starty_trans] == 0 , [startx_trans, starty_trans]
self.start_node = node(startx_trans, starty_trans)
self.goal_node = node(goalx, goaly)
goalx_trans, goaly_trans = self.transfer_to_array_coordinate(
self.goal_node)
# print startx_trans, starty_trans, goalx_trans, goaly_trans
self.goal_node = node(goalx_trans, goaly_trans)
self.open.append(self.start_node)
self.dis_epsilon = 100
self.motion = [[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1],
[-1, -1, np.sqrt(2)],
[-1, 1, np.sqrt(2)],
[1, -1, np.sqrt(2)],
[1, 1, np.sqrt(2)]]
def euclidean_distance(self, node_1, node_2):
return np.sqrt((node_1.x-node_2.x)**2 + (node_1.y-node_2.y)**2)
# def g_cost(self, node):
# g_cost = self.euclidean_distance(self.start_node, node)
# return int(g_cost*10)
def h_cost(self, node):
h_cost = self.euclidean_distance(self.goal_node, node)
return h_cost
def f_cost(self, node):
g_cost = node.g_cost
h_cost = self.h_cost(node)
return g_cost+self.dis_epsilon*h_cost
def transfer_to_array_coordinate(self, node):
i = int(self.grid_map.shape[0]/2)-node.y if self.grid_map.shape[0] % 2 != 0 else int(
self.grid_map.shape[0]/2)-node.y-1
j = int(self.grid_map.shape[1]/2)+node.x if self.grid_map.shape[1] % 2 != 0 else int(
self.grid_map.shape[1]/2)+node.x-1
return np.ceil(i), np.ceil(j)
def transfer_to_map_coordinates(self, node):
y = (self.grid_map.shape[0]/2)-node.x if self.grid_map.shape[0] % 2 != 0 else (
self.grid_map.shape[0]/2)-1-node.x
# print (self.grid_map.shape[0]/2), node.x
x = -(self.grid_map.shape[1]/2)+node.y if self.grid_map.shape[1] % 2 != 0 else -(
self.grid_map.shape[1]/2)+node.y+1
# print (self.grid_map.shape[1]/2), node.y
return [x, y]
def traversable(self, i, j):
if 0 <= i < self.grid_map.shape[0] and 0 <= j < self.grid_map.shape[1] and self.grid_map[i, j] == 0:
if (0 <= i-1 < self.grid_map.shape[0] and 0 <= j-1 < self.grid_map.shape[1] and self.grid_map[i-1, j] == 1 and self.grid_map[i, j-1] == 1):
return False
elif (0 <= i-1 < self.grid_map.shape[0] and 0 <= j+1 < self.grid_map.shape[1] and self.grid_map[i-1, j] == 1 and self.grid_map[i, j+1] == 1):
return False
elif (0 <= i+1 < self.grid_map.shape[0] and 0 <= j-1 < self.grid_map.shape[1] and self.grid_map[i+1, j] == 1 and self.grid_map[i, j-1] == 1):
return False
elif (0 <= i+1 < self.grid_map.shape[0] and 0 <= j+1 < self.grid_map.shape[1] and self.grid_map[i+1, j] == 1 and self.grid_map[i, j+1] == 1):
return False
else:
return True
else:
return False
def inside(self, node, node_list):
x, y = node.x, node.y
coordinate_list = [[node.x, node.y] for node in node_list]
# print coordinate_list
if [x, y] in coordinate_list:
return True
else:
return False
def route_plan(self):
while True:
self.open.sort(cmp=None, key=lambda x: x.f_cost, reverse=False)
current = self.open[0]
if current.x == self.goal_node.x and current.y == self.goal_node.y:
# rospy.loginfo("A_Star alg finished")
# print "A_Star alg finished"
self.end_node = current
return self.output_route()
self.closed.append(current)
self.open.pop(0)
for _, j in enumerate(self.motion):
neighbour = node(
x=current.x+j[0], y=current.y+j[1], g_cost=current.g_cost+j[2])
if not self.traversable(neighbour.x, neighbour.y) or self.inside(neighbour, self.closed):
continue
if neighbour.g_cost < current.g_cost or not self.inside(neighbour, self.open):
neighbour.f_cost = self.f_cost(neighbour)
neighbour.parent = current
if not self.inside(neighbour, self.open):
self.open.append(neighbour)
def print_result_grid(self, nodelist):
cunt = -1
self.step[self.start_node.x, self.start_node.y] = "$"
self.step[self.goal_node.x, self.goal_node.y] = "%"
for i in nodelist:
self.step[i[0], i[1]] = "*"
self.step[i[0], i[1]] = (cunt)
cunt = cunt - 1
print self.step
def output_route(self):
last_point = self.end_node
result = []
# cunt = 0
while last_point is not None:
# print [last_point.x,last_point.y]
result.append(self.transfer_to_map_coordinates(
node(last_point.x, last_point.y)))
last_point = last_point.parent
# print cunt
# cunt = cunt +1
result.reverse()
return result
class Move:
def __init__(self, planned_route, startx, starty, goalx, goaly, grid_map):
self.grid_map = grid_map
self.planned_route = planned_route
self.goalx = goalx
self.goaly = goaly
self.pos_x = 0
self.pos_y = 0
self.pos_z = 0
self.ori_x = 0
self.ori_y = 0
self.ori_z = 0
self.ori_w = 0
self.eul_x = 0
self.eul_y = 0
self.eul_z = 0
self.start_x = startx
self.start_y = starty
self.startpoint_recorded = False
self.ranges = []
self.now_node = 0
self.rotation = True
self.adjust = False
self.point_judge_thre = 0.5
self.collision_thre = 0.45
self.can_run = True
self.can_routefollow = True
self.route = self.planned_route[1:-1]
self.route.append([self.goalx, self.goaly])
self.adj_angle = 0
self.can_set_adj_angle = True
self.can_adj_rotation = True
self.adj_end_point_cunt = 25
self.ranges_min_val = 0
self.ranges_min_idx = 0
self.need_ob = False
def callback(self, odom, laser):
self.pos_x = odom.pose.pose.position.x
self.pos_y = odom.pose.pose.position.y
self.pos_z = odom.pose.pose.position.z
self.ori_x = odom.pose.pose.orientation.x
self.ori_y = odom.pose.pose.orientation.y
self.ori_z = odom.pose.pose.orientation.z
self.ori_w = odom.pose.pose.orientation.w
eul = euler_from_quaternion(
(self.ori_x, self.ori_y, self.ori_z, self.ori_w))
self.eul_x = eul[0]
self.eul_y = eul[1]
self.eul_z = eul[2]
self.ranges = laser.ranges
self.ranges_min_val = min(self.ranges)
self.ranges_min_idx = self.ranges.index(self.ranges_min_val)
if self.startpoint_recorded == False:
self.start_x = self.pos_x
self.start_y = self.pos_y
self.startpoint_recorded = True
# print self.pos_x, self.ranges[:6]
# print self.target_x
def collect_data(self):
rate = rospy.Rate(20)
laser_sub = message_filters.Subscriber('/base_scan', LaserScan)
odom_sub = message_filters.Subscriber(
'/base_pose_ground_truth', Odometry)
ts = message_filters.TimeSynchronizer(
[odom_sub, laser_sub], 100)
ts.registerCallback(self.callback)
rate.sleep()
def move(self, angle, speed):
# rospy.init_node('robot_move', anonymous=True)
vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
# rate = rospy.Rate(10)
vel_msg = Twist()
vel_msg.linear.x = speed
vel_msg.linear.y = 0
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = angle
vel_pub.publish(vel_msg)
def distance_to_point(self, node_1, node_2):
return np.sqrt((node_1[0]-node_2[0])**2+(node_1[1]-node_2[1])**2)
def go(self):
self.collect_data()
min_val = self.ranges_min_val
min_idx = self.ranges_min_idx
if min_val < self.collision_thre or self.need_ob:
# rospy.loginfo(str(min_val)+","+str(min_idx))
self.obstacle_avoidance(min_idx, min_val)
if self.can_routefollow:
self.route_follow()
#
def obstacle_avoidance(self, min_index, min_value):
self.need_ob = True
self.rotation = True
self.now_node = 0
rospy.loginfo("0")
self.can_routefollow = False
if self.distance_to_point([self.pos_x, self.pos_y], [self.goalx,self.goaly]) < self.point_judge_thre:
rospy.signal_shutdown("Done!")
if self.can_set_adj_angle:
rospy.loginfo("-1")
if 0 <= min_index <= 90:
self.can_set_adj_angle = False
self.adj_angle = math.radians(25)+self.eul_z
rospy.loginfo("-1.1")
elif 90 < min_index <= 180:
self.can_set_adj_angle = False
self.adj_angle = math.radians(50)+self.eul_z
rospy.loginfo("-1.2")
elif 180 < min_index <= 270:
self.can_set_adj_angle = False
self.adj_angle = -math.radians(25)+self.eul_z
rospy.loginfo("-1.3")
elif 270 < min_index <= 380:
self.can_set_adj_angle = False
self.adj_angle = -math.radians(50)+self.eul_z
rospy.loginfo("-1.4")
now_ori = self.eul_z
target_ori = self.adj_angle
ori_diff = target_ori-now_ori
# rospy.loginfo(str(now_ori)+","+str(target_ori)+","+str(ori_diff))
positive_clock = False if ori_diff >= 0 else True
if self.can_adj_rotation:
if math.fabs(ori_diff) > math.radians(5):
if positive_clock:
self.move(-0.45, 0)
rospy.loginfo("1")
else:
self.move(0.45, 0)
rospy.loginfo("2")
else:
self.can_adj_rotation = False
rospy.loginfo("3")
else:
rospy.loginfo(str(min_value)+","+str(self.collision_thre+1.3))
if min_value < self.collision_thre+1:
rospy.loginfo("4")
self.move(0, 0.55)
else:
rospy.loginfo("RARARARARARRARA")
while self.grid_map[self.pos_x,self.pos_y] == 1:
self.pos_x = self.pos_x-1
self.pos_y = self.pos_y +1
a_star = A_Star(self.pos_x,self.pos_y, self.goalx,
self.goaly, self.grid_map)
self.planned_route = a_star.route_plan()
self.route = self.planned_route[1:-1]
self.route.append([self.goalx, self.goaly])
self.can_routefollow = True
self.can_adj_rotation = True
self.can_set_adj_angle = True
self.need_ob = False
# rospy.signal_shutdown("trans")
def route_follow(self):
if self.distance_to_point([self.pos_x, self.pos_y], [self.goalx,self.goaly]) < self.point_judge_thre:
rospy.signal_shutdown("Done!")
if self.can_run:
target_node = self.route[self.now_node]
rospy.loginfo("route follow to "+str(target_node))
# rospy.loginfo(
# "move from" + str([self.pos_x, self.pos_y])+" to " + str(target_node))
target_x = target_node[0]
target_y = target_node[1]
# self.collect_data()
now_ori = self.eul_z
target_ori = math.atan2(
target_y - self.pos_y, target_x - self.pos_x)
ori_diff = target_ori-now_ori
# rospy.loginfo(str(now_ori)+","+str(target_ori)+","+str(ori_diff))
positive_clock = False if ori_diff >= 0 else True
if self.rotation:
rospy.loginfo("rotating")
if math.fabs(ori_diff) > math.radians(4):
if positive_clock:
self.move(-0.5, 0)
else:
self.move(0.5, 0)
else:
self.rotation = False
else:
if self.distance_to_point([self.pos_x, self.pos_y], target_node) > self.point_judge_thre:
rospy.loginfo("going")
self.move(0, 0.45)
# rospy.loginfo()
# rospy.loginfo(str(minn))
else:
self.rotation = True
self.now_node = self.now_node+1
if self.now_node > len(self.route)-1:
# rospy.loginfo(str(self.now_node)+","+str(len(route)-1)+","+str(len(route)))
self.can_run = False
else:
rospy.loginfo("Done!")
rospy.signal_shutdown("Done!")
class PA2():
def __init__(self, gird):
rospy.init_node('pa2_main', anonymous=True)
rospy.set_param("/goalx", 4.5)
rospy.set_param("/goaly", 9.0)
self.map_grid = gird
self.startx = -8.0
self.starty = -2.0
self.goalx = rospy.get_param("/goalx")
self.goaly = rospy.get_param("/goaly")
self.planned_route = None
self.a_star = A_Star(self.startx, self.starty, self.goalx,
self.goaly, self.map_grid)
self.planned_route = self.a_star.route_plan()
rospy.loginfo("route planned!")
self.move = Move(self.planned_route, self.startx,
self.starty, self.goalx, self.goaly, self.map_grid)
def main(self):
# print self.planned_route
while not rospy.is_shutdown():
self.move.go()
if __name__ == "__main__":
# address need be changed when run in terminal
print os.getcwd()
grid = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |
values(self, value):
"""
Setter for **self.values** attribute.
Parameters
----------
value : object
Attribute value.
"""
raise AttributeError('"{0}" attribute is read only!'.format('values'))
@property
def items(self):
"""
Property for **self.items** attribute. This is a convenient attribute
used to iterate over the tri-spectral power distribution.
Returns
-------
generator
Tri-spectral power distribution data generator.
"""
return self.__iter__()
@items.setter
def items(self, value):
"""
Setter for **self.items** attribute.
Parameters
----------
value : object
Attribute value.
"""
raise AttributeError('"{0}" attribute is read only!'.format('items'))
@property
def shape(self):
"""
Property for **self.shape** attribute.
Returns the shape of the tri-spectral power distribution in the form of
a :class:`SpectralShape` class instance.
Returns
-------
SpectralShape
Tri-spectral power distribution shape.
See Also
--------
SpectralPowerDistribution.is_uniform,
TriSpectralPowerDistribution.is_uniform
Warning
-------
:attr:`TriSpectralPowerDistribution.shape` is read only.
Examples
--------
>>> x_bar = {510: 49.67, 520: 69.59, 530: 81.73, 540: 88.19}
>>> y_bar = {510: 90.56, 520: 87.34, 530: 45.76, 540: 23.45}
>>> z_bar = {510: 12.43, 520: 23.15, 530: 67.98, 540: 90.28}
>>> data = {'x_bar': x_bar, 'y_bar': y_bar, 'z_bar': z_bar}
>>> mpg = lbl = {'x': 'x_bar', 'y': 'y_bar', 'z': 'z_bar'}
>>> tri_spd = TriSpectralPowerDistribution('Tri Spd', data, mpg, lbl)
>>> tri_spd.shape
SpectralShape(510, 540, 10)
"""
return self.x.shape
@shape.setter
def shape(self, value):
"""
Setter for **self.shape** attribute.
Parameters
----------
value : object
Attribute value.
"""
raise AttributeError('"{0}" attribute is read only!'.format('shape'))
def __hash__(self):
"""
Returns the spectral power distribution hash value.
Returns
-------
int
Object hash.
Notes
-----
- Reimplements the :meth:`object.__hash__` method.
Warning
-------
See :meth:`SpectralPowerDistribution.__hash__` method warning section.
References
----------
.. [5] http://stackoverflow.com/a/16162138/931625
(Last accessed 8 August 2014)
"""
return hash((frozenset(self.__data.get('x')),
frozenset(self.__data.get('y')),
frozenset(self.__data.get('z'))))
def __getitem__(self, wavelength):
"""
Returns the values for given wavelength :math:`\lambda`.
Parameters
----------
wavelength: numeric
Wavelength :math:`\lambda` to retrieve the values.
Returns
-------
ndarray, (3,)
Wavelength :math:`\lambda` values.
See Also
--------
TriSpectralPowerDistribution.get
Notes
-----
- Reimplements the :meth:`object.__getitem__` method.
Examples
--------
>>> x_bar = {510: 49.67, 520: 69.59, 530: 81.73, 540: 88.19}
>>> y_bar = {510: 90.56, 520: 87.34, 530: 45.76, 540: 23.45}
>>> z_bar = {510: 12.43, 520: 23.15, 530: 67.98, 540: 90.28}
>>> data = {'x_bar': x_bar, 'y_bar': y_bar, 'z_bar': z_bar}
>>> mpg = lbl = {'x': 'x_bar', 'y': 'y_bar', 'z': 'z_bar'}
>>> tri_spd = TriSpectralPowerDistribution('Tri Spd', data, mpg, lbl)
>>> tri_spd[510]
array([ 49.67, 90.56, 12.43])
"""
return np.array((self.x[wavelength],
self.y[wavelength],
self.z[wavelength]))
def __setitem__(self, wavelength, value):
"""
Sets the wavelength :math:`\lambda` with given value.
Parameters
----------
wavelength : numeric
Wavelength :math:`\lambda` to set.
value : array_like
Value for wavelength :math:`\lambda`.
Notes
-----
- Reimplements the :meth:`object.__setitem__` method.
Examples
--------
>>> x_bar = {}
>>> y_bar = {}
>>> z_bar = {}
>>> data = {'x_bar': x_bar, 'y_bar': y_bar, 'z_bar': z_bar}
>>> mpg = lbl = {'x': 'x_bar', 'y': 'y_bar', 'z': 'z_bar'}
>>> tri_spd = TriSpectralPowerDistribution('Tri Spd', data, mpg, lbl)
>>> tri_spd[510] = (49.6700, 49.6700, 49.6700)
>>> tri_spd.values
array([[ 49.67, 49.67, 49.67]])
"""
x, y, z = np.ravel(value)
self.x.__setitem__(wavelength, x)
self.y.__setitem__(wavelength, y)
self.z.__setitem__(wavelength, z)
def __iter__(self):
"""
Returns a generator for the tri-spectral power distribution data.
Returns
-------
generator
Tri-spectral power distribution data generator.
Notes
-----
- Reimplements the :meth:`object.__iter__` method.
Examples
--------
>>> x_bar = {510: 49.67, 520: 69.59, 530: 81.73, 540: 88.19}
>>> y_bar = {510: 90.56, 520: 87.34, 530: 45.76, 540: 23.45}
>>> z_bar = {510: 12.43, 520: 23.15, 530: 67.98, 540: 90.28}
>>> data = {'x_bar': x_bar, 'y_bar': y_bar, 'z_bar': z_bar}
>>> mpg = lbl = {'x': 'x_bar', 'y': 'y_bar', 'z': 'z_bar'}
>>> tri_spd = TriSpectralPowerDistribution('Tri Spd', data, mpg, lbl)
>>> for wavelength, value in tri_spd: print((wavelength, value))
(510, array([ 49.67, 90.56, 12.43]))
(520, array([ 69.59, 87.34, 23.15]))
(530, array([ 81.73, 45.76, 67.98]))
(540, array([ 88.19, 23.45, 90.28]))
"""
return itertools.izip(self.wavelengths, self.values)
def __contains__(self, wavelength):
"""
Returns if the tri-spectral power distribution contains the given
wavelength :math:`\lambda`.
Parameters
----------
wavelength : numeric
Wavelength :math:`\lambda`.
Returns
-------
bool
Is wavelength :math:`\lambda` in the tri-spectral power
distribution.
Notes
-----
- Reimplements the :meth:`object.__contains__` method.
Examples
--------
>>> x_bar = {510: 49.67, 520: 69.59, 530: 81.73, 540: 88.19}
>>> y_bar = {510: 90.56, 520: 87.34, 530: 45.76, 540: 23.45}
>>> z_bar = {510: 12.43, 520: 23.15, 530: 67.98, 540: 90.28}
>>> data = {'x_bar': x_bar, 'y_bar': y_bar, 'z_bar': z_bar}
>>> mpg = lbl = {'x': 'x_bar', 'y': 'y_bar', 'z': 'z_bar'}
>>> tri_spd = TriSpectralPowerDistribution('Tri Spd', data, mpg, lbl)
>>> 510 in tri_spd
True
"""
return wavelength in self.x
def __len__(self):
"""
Returns the tri-spectral power distribution wavelengths
:math:`\lambda_n` count.
Returns
-------
int
Tri-Spectral power distribution wavelengths :math:`\lambda_n`
count.
Notes
-----
- Reimplements the :meth:`object.__len__` method.
Examples
--------
>>> x_bar = {510: 49.67, 520: 69.59, 530: 81.73, 540: 88.19}
>>> y_bar = {510: 90.56, 520: 87.34, 530: 45.76, 540: 23.45}
>>> z_bar = {510: 12.43, 520: 23.15, 530: 67.98, 540: 90.28}
>>> data = {'x_bar': x_bar, 'y_bar': y_bar, 'z_bar': z_bar}
>>> mpg = lbl = {'x': 'x_bar', 'y': 'y_bar', 'z': 'z_bar'}
>>> tri_spd = TriSpectralPowerDistribution('Tri Spd', data, mpg, lbl)
>>> len(tri_spd)
4
"""
return len(self.x)
def __eq__(self, tri_spd):
"""
Returns the tri-spectral power distribution equality with given other
tri-spectral power distribution.
Parameters
----------
spd : TriSpectralPowerDistribution
Tri-spectral power distribution to compare for equality.
Returns
-------
bool
Tri-spectral power distribution equality.
Notes
-----
- Reimplements the :meth:`object.__eq__` method.
Examples
--------
>>> x_bar = {510: 49.67, 520: 69.59, 530: 81.73, 540: 88.19}
>>> y_bar = {510: 90.56, 520: 87.34, 530: 45.76, 540: 23.45}
>>> z_bar = {510: 12.43, 520: 23.15, 530: 67.98, 540: 90.28}
>>> data1 = {'x_bar': x_bar, 'y_bar': y_bar, 'z_bar': z_bar}
>>> data2 = {'x_bar': y_bar, 'y_bar': x_bar, 'z_bar': z_bar}
>>> mpg = lbl = {'x': 'x_bar', 'y': 'y_bar', 'z': 'z_bar'}
>>> tri_spd1 = TriSpectralPowerDistribution('Tri Spd', data1, mpg, lbl)
>>> tri_spd2 = TriSpectralPowerDistribution('Tri Spd', data2, mpg, lbl)
>>> tri_spd3 = TriSpectralPowerDistribution('Tri Spd', data1, mpg, lbl)
>>> tri_spd1 == tri_spd2
False
>>> tri_spd1 == tri_spd3
True
"""
if not isinstance(tri_spd, self.__class__):
return False
equality = True
for axis in self.__mapping:
equality *= getattr(self, axis) == getattr(tri_spd, axis)
return bool(equality)
def __ne__(self, tri_spd):
"""
Returns the tri-spectral power distribution inequality with given other
tri-spectral power distribution.
Parameters
----------
spd : TriSpectralPowerDistribution
Tri-spectral power distribution to compare for inequality.
Returns
-------
bool
Tri-spectral power distribution inequality.
Notes
-----
- Reimplements the :meth:`object.__eq__` method.
Examples
--------
>>> x_bar = {510: 49.67, 520: 69.59, 530: 81.73, 540: 88.19}
>>> y_bar = {510: 90.56, 520: 87.34, 530: 45.76, 540: 23.45}
>>> z_bar = {510: 12.43, 520: 23.15, 530: 67.98, 540: 90.28}
>>> data1 = {'x_bar': x_bar, 'y_bar': y_bar, 'z_bar': z_bar}
>>> data2 = {'x_bar': y_bar, 'y_bar': x_bar, 'z_bar': z_bar}
>>> mpg = lbl = {'x': 'x_bar', 'y': 'y_bar', 'z': 'z_bar'}
>>> tri_spd1 = TriSpectralPowerDistribution('Tri Spd', data1, mpg, lbl)
>>> tri_spd2 = TriSpectralPowerDistribution('Tri Spd', data2, mpg, lbl)
>>> tri_spd3 = TriSpectralPowerDistribution('Tri Spd', data1, mpg, lbl)
>>> tri_spd1 != tri_spd2
True
>>> tri_spd1 != tri_spd3
False
"""
return not (self == tri_spd)
def __format_operand(self, x):
"""
Formats given :math:`x` variable operand to *numeric* or *ndarray*.
This method is a convenient method to prepare the given :math:`x`
variable for the arithmetic operations below.
Parameters
----------
x : numeric or ndarray or TriSpectralPowerDistribution
Variable to format.
Returns
-------
numeric or ndarray
Formatted operand.
"""
if issubclass(type(x), TriSpectralPowerDistribution):
x = x.values
elif is_iterable(x):
x = to_ndarray(x)
return x
def __add__(self, x):
"""
Implements support for tri-spectral power distribution addition.
Parameters
----------
x : numeric or array_like or TriSpectralPowerDistribution
Variable to add.
Returns
-------
TriSpectralPowerDistribution
Variable added tri-spectral power distribution.
See Also
--------
TriSpectralPowerDistribution.__sub__,
TriSpectralPowerDistribution.__mul__,
TriSpectralPowerDistribution.__div__
Notes
-----
- Reimplements the :meth:`object.__add__` method.
Warning
-------
The addition operation happens in place.
Examples
--------
Adding a single *numeric* variable:
>>> x_bar = {510: 49.67, 520: 69.59, 530: 81.73, 540: 88.19}
>>> y_bar = {510: 90.56, 520: 87.34, 530: 45.76, 540: 23.45}
>>> z_bar = {510: 12.43, 520: 23.15, 530: 67.98, 540: 90.28}
>>> data = {'x_bar': x_bar, 'y_bar': y_bar, 'z_bar': z_bar}
>>> mpg = lbl = {'x': | |
<reponame>kigero/rules_haxe<gh_stars>0
"""
Haxe utility functions.
"""
load(":providers.bzl", "HaxeLibraryInfo", "HaxeProjectInfo")
def determine_source_root(path):
"""
Determine the source root for a given path, based on whether the path is in the external directory.
Args:
path: The path to check.
Returns:
The source root for the path.
"""
source_root = ""
parts = path.split("/")
for idx in range(len(parts)):
if parts[idx] == "external":
source_root += "external/{}/".format(parts[idx + 1])
return source_root
def _determine_classpath(classpaths, path):
classpath = determine_source_root(path)
if classpath == "":
for cp in classpaths:
cp_idx = path.find(cp)
if cp_idx > 0:
classpath = path[0:cp_idx]
return classpath
def find_direct_sources(ctx):
"""
Finds the direct sources of the given context.
Args:
ctx: The bazel context.
Returns:
An array of source files.
"""
rtrn = []
if hasattr(ctx.files, "srcs"):
rtrn += ctx.files.srcs
if hasattr(ctx.attr, "deps"):
for dep in ctx.attr.deps:
haxe_dep = dep[HaxeProjectInfo]
if haxe_dep == None:
continue
if hasattr(haxe_dep, "srcs"):
rtrn += haxe_dep.srcs
return rtrn
def find_direct_docsources(ctx):
"""
Finds the direct document sources of the given context.
Args:
ctx: The bazel context.
Returns:
An array of document source files.
"""
rtrn = []
if hasattr(ctx.files, "doc_srcs"):
rtrn += ctx.files.doc_srcs
for dep in ctx.attr.deps:
haxe_dep = dep[HaxeProjectInfo]
if haxe_dep == None:
continue
if hasattr(haxe_dep, "doc_srcs"):
rtrn += haxe_dep.doc_srcs
return rtrn
def find_direct_resources(ctx):
"""
Finds the direct resources of the given context.
Args:
ctx: The bazel context.
Returns:
An array of resource files.
"""
rtrn = []
if hasattr(ctx.files, "resources"):
rtrn += ctx.files.resources
if hasattr(ctx.attr, "deps"):
for dep in ctx.attr.deps:
haxe_dep = dep[HaxeProjectInfo]
if haxe_dep == None:
continue
if hasattr(haxe_dep, "resources"):
rtrn += haxe_dep.resources
return rtrn
def find_library_name(ctx):
"""
Determines the library name, taking into account any dependent HaxeProjectInfos.
Args:
ctx: The bazel context.
Returns:
The specified library name.
"""
if hasattr(ctx.attr, "library_name") and ctx.attr.library_name != "":
return ctx.attr.library_name
elif hasattr(ctx.attr, "executable_name") and ctx.attr.executable_name != "":
return ctx.attr.executable_name
elif hasattr(ctx.attr, "deps"):
for dep in ctx.attr.deps:
haxe_dep = dep[HaxeProjectInfo]
if haxe_dep == None:
continue
if haxe_dep.library_name != None and haxe_dep.library_name != "":
return haxe_dep.library_name
return ctx.attr.name
def find_main_class(ctx):
"""
Determines the main class, taking into account any dependant HaxeProjectInfos.
Args:
ctx: The bazel context.
Returns:
The specified main class.
"""
if hasattr(ctx.attr, "main_class") and ctx.attr.main_class != "":
return ctx.attr.main_class
else:
for dep in ctx.attr.deps:
haxe_dep = dep[HaxeProjectInfo]
if haxe_dep == None:
continue
if hasattr(haxe_dep, "main_class") and haxe_dep.main_class != "":
return haxe_dep.main_class
return None
def create_hxml_map(ctx, toolchain, for_test = False, for_std_build = False):
"""
Create a dict containing haxe build parameters based on the input attributes from the calling rule.
Args:
ctx: Bazel context.
toolchain: The Haxe toolchain instance.
for_test: True if build parameters for unit testing should be added, False otherwise.
for_std_build: True if build parameters for the standard build should be added, False otherwise.
Returns:
A dict containing the HXML properties.
"""
hxml = {}
package = ctx.label.package + "/" if ctx.label.package != "" else ""
hxml["package"] = package
hxml["for_test"] = for_test
hxml["target"] = ctx.attr.target if hasattr(ctx.attr, "target") else None
hxml["debug"] = ctx.attr.debug if hasattr(ctx.attr, "debug") else False
hxml["name"] = "std-{}".format(hxml["target"]) if for_std_build else find_library_name(ctx)
if for_test:
hxml["main_class"] = "MainTest"
elif for_std_build:
hxml["main_class"] = "StdBuild"
else:
hxml["main_class"] = find_main_class(ctx)
hxml["args"] = list()
if hasattr(ctx.attr, "extra_args"):
for arg in ctx.attr.extra_args:
if not arg in hxml["args"]:
hxml["args"].append(arg)
hxml["libs"] = dict()
if hxml["target"] == "java":
hxml["libs"]["hxjava"] = toolchain.haxelib_language_versions["hxjava"]
elif hxml["target"] == "cpp":
hxml["libs"]["hxcpp"] = toolchain.haxelib_language_versions["hxcpp"]
if ctx.var["TARGET_CPU"].startswith("x64") and not "-D HXCPP_M64" in hxml["args"]:
hxml["args"].append("-D HXCPP_M64")
if hasattr(ctx.attr, "haxelibs"):
for lib in ctx.attr.haxelibs:
version = ctx.attr.haxelibs[lib]
if version != None and version != "":
if version.lower().find("http") == 0:
version = "git:{}".format(version)
hxml["libs"][lib] = version
else:
fail("Explicit versioning is required for haxelibs.")
hxml["classpaths"] = list()
hxml["classpaths"].append("src/main/haxe")
if for_test:
hxml["classpaths"].append(ctx.var["BINDIR"])
hxml["classpaths"].append("{}/{}".format(ctx.var["BINDIR"], package))
hxml["classpaths"].append("src/test/haxe")
hxml["classpaths"].append("{}src/test/haxe".format(package))
if hasattr(ctx.attr, "classpaths"):
for p in ctx.attr.classpaths:
hxml["classpaths"].append(p)
hxml["source_files"] = list()
for src in find_direct_sources(ctx):
hxml["source_files"].append(src.path)
hxml["resources"] = dict()
for resource in find_direct_resources(ctx):
name = resource.path
name = name.replace("src/main/resources/", "")
name = name.replace("src/test/resources/", "")
parts = name.split("/")
new_name = ""
skip = False
for idx in range(len(parts)):
if skip:
skip = False
continue
elif parts[idx] == "external":
new_name = ""
skip = True
elif parts[idx] != "":
if new_name != "":
new_name += "/"
new_name += parts[idx]
hxml["resources"][resource.path] = new_name
hxml["c-args"] = list()
if hxml["target"] == "java":
if "haxe_java_target_version" in ctx.var:
hxml["c-args"] += ["-source", ctx.var["haxe_java_target_version"], "-target", ctx.var["haxe_java_target_version"]]
# Handle Dependencies
if hasattr(ctx.attr, "deps"):
for dep in ctx.attr.deps:
haxe_dep = dep[HaxeLibraryInfo]
if haxe_dep == None or haxe_dep.hxml == None:
continue
dep_hxml = haxe_dep.hxml
for classpath in dep_hxml["classpaths"]:
if classpath.startswith("external"):
parts = classpath.split("/")
new_classpath = ""
for idx in range(len(parts)):
if parts[idx] == "external":
new_classpath = "external"
elif parts[idx] != "":
new_classpath += "/" + parts[idx]
if not new_classpath in hxml["classpaths"]:
hxml["classpaths"].append(new_classpath)
else:
calculated_classpath = _determine_classpath(dep_hxml["classpaths"], dep_hxml["source_files"][0]) if len(dep_hxml["source_files"]) != 0 else ""
if calculated_classpath == dep_hxml["package"]:
calculated_classpath = ""
hxml["classpaths"].append("{}{}{}".format(calculated_classpath, dep_hxml["package"], classpath))
for lib in dep_hxml["libs"]:
if not lib in hxml["libs"]:
hxml["libs"][lib] = dep_hxml["libs"][lib]
for resource in dep_hxml["resources"]:
if not resource in hxml["resources"]:
hxml["resources"][resource] = dep_hxml["resources"][resource]
for arg in dep_hxml["args"]:
if not arg in hxml["args"]:
hxml["args"].append(arg)
is_external = ctx.label.workspace_root.startswith("external")
hxml["external_dir"] = "external/{}/".format(hxml["name"]) if is_external else ""
return hxml
def create_build_hxml(ctx, toolchain, hxml, out_file, suffix = "", for_exec = False):
"""
Create the build.hxml file based on the input hxml dict.
Any Haxelibs that are specified in the hxml will be installed at this time.
Args:
ctx: Bazel context.
toolchain: The Haxe toolchain instance.
hxml: A dict containing HXML parameters; should be generated from `_create_hxml_map`.
out_file: The output file that the build.hxml should be written to.
suffix: Optional suffix to append to the build parameters.
for_exec: Whether this build HXML is intended for executing the result of the build; this can ignore some errors
that aren't an issue during execution.
"""
# Determine if we're in a dependant build.
if for_exec or len(hxml["source_files"]) == 0 or len(ctx.files.srcs) == 0:
is_dependent_build = True
else:
is_dependent_build = hxml["source_files"][0].startswith("external")
# An empty source root seems to cover the use cases that are currently in use; this may need to be revisited, but
# will require unit test cases!
source_root = ""
content = ""
package = ctx.label.package + "/" if ctx.label.package != "" else ""
# Target
hxml["output_dir"] = "{}{}".format(ctx.attr.name, suffix)
hxml["build_file"] = "{}/{}{}{}/{}".format(ctx.var["BINDIR"], hxml["external_dir"], package, hxml["output_dir"], hxml["name"])
ext = ""
if hxml["target"] != "":
if hxml["target"] == "neko":
ext = ".n"
hxml["output_file"] = "{}.n".format(hxml["name"], suffix)
elif hxml["target"] == "python":
ext = ".py"
hxml["output_file"] = "{}.py".format(hxml["name"], suffix)
elif hxml["target"] == "php":
hxml["output_file"] = "{}".format(hxml["name"], suffix)
elif hxml["target"] == "cpp":
output = "{}/".format(hxml["name"])
output_file = ""
if not for_exec:
output_file += "lib"
if hxml["main_class"] != None:
mc = hxml["main_class"]
if "." in mc:
mc = mc[mc.rindex(".") + 1:]
output_file += "{}".format(mc)
else:
output_file += "{}".format(hxml["name"])
if hxml["debug"] != None:
output_file += "-debug"
found_output_file = False
for arg in hxml["args"]:
if arg.lower().startswith("-d haxe_output_file"):
found_output_file = True
if not found_output_file:
hxml["args"].append("-D HAXE_OUTPUT_FILE={}".format(output_file))
if for_exec:
output_file += ".exe"
elif "-D dll_link" in hxml["args"]:
output_file += ".dll"
else:
output_file += ".lib"
hxml["output_file"] = output + output_file
elif hxml["target"] == "java":
output = "{}".format(hxml["name"])
if hxml["main_class"] != None:
mc = hxml["main_class"]
if "." in mc:
mc = mc[mc.rindex(".") + 1:]
output += "/{}".format(mc)
else:
output += "/{}".format(hxml["name"])
if hxml["debug"] != None:
output += "-Debug"
hxml["output_file"] = output + ".jar"
elif hxml["target"] == "js":
ext = ".js"
hxml["output_file"] = "{}.js".format(hxml["name"], suffix)
content += "--{} {}{}\n".format(hxml["target"], hxml["build_file"], ext)
# Debug
if hxml["debug"] != None:
content += "-debug\n"
# Classpaths
for classpath in hxml["classpaths"]:
if not classpath.startswith("external"):
classpath = "{}{}".format(source_root, classpath)
content += "-p {}\n".format(classpath)
# Compiler Args
for c_arg in hxml["c-args"]:
content += "--c-arg {}\n".format(c_arg)
# User Args
for arg in hxml["args"]:
content += "{}\n".format(arg)
# Resources
for path in hxml["resources"]:
content += "--resource {}@{}\n".format(path, hxml["resources"][path])
# Source or Main files
if hxml["main_class"] != None:
content += "-m {}\n".format(hxml["main_class"])
else:
for path in hxml["source_files"]:
if is_dependent_build:
path = path[len(source_root):]
for classpath in hxml["classpaths"]:
if path.startswith(classpath):
path = path[len(classpath) + 1:]
break
content += path.replace(".hx", "").replace("/", ".") + "\n"
count | |
# =============================================================================
# Copyright 2020 NVIDIA. All Rights Reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/data_utils.py
"""
from nemo import logging
__all__ = ['InputExample', 'STR_DONTCARE', 'STATUS_OFF', 'STATUS_ACTIVE', 'STATUS_DONTCARE', 'truncate_seq_pair']
STR_DONTCARE = "dontcare"
# These are used to represent the status of slots (off, active, dontcare) and
# intents (off, active) in dialogue state tracking.
STATUS_OFF = 0
STATUS_ACTIVE = 1
STATUS_DONTCARE = 2
STATUS_CARRY = 3
class InputExample(object):
"""An example for training/inference."""
def __init__(
self,
schema_config,
service_schema,
example_id="NONE",
example_id_num=[],
is_real_example=False,
tokenizer=None,
):
"""Constructs an InputExample.
Args:
max_seq_length: The maximum length of the sequence. Sequences longer than
this value will be truncated.
service_schema: A ServiceSchema object wrapping the schema for the service
corresponding to this example.
example_id: Unique identifier for the example, like: 'train-1_00000-00-Restaurants_1'
example_id_num: dialogue_id and turn_id combined and service id combined into a list of ints,
like: [1, 0, 0, 18]
is_real_example: Indicates if an example is real or used for padding in a
minibatch.
tokenizer (Tokenizer): such as NemoBertTokenizer
"""
self.schema_config = schema_config
self.service_schema = service_schema
self.example_id = example_id
self.example_id_num = example_id_num
self._add_carry_value = service_schema._add_carry_value
self._add_carry_status = service_schema._add_carry_status
self.is_real_example = is_real_example
self._max_seq_length = schema_config["MAX_SEQ_LENGTH"]
self._tokenizer = tokenizer
if self.is_real_example and self._tokenizer is None:
raise ValueError("Must specify tokenizer when input is a real example.")
self.user_utterance = ''
self.system_utterance = ''
# The id of each subword in the vocabulary for BERT.
self.utterance_ids = [0] * self._max_seq_length
# Denotes the identity of the sequence. Takes values 0 (system utterance) and 1 (user utterance).
self.utterance_segment = [0] * self._max_seq_length
# Mask which takes the value 0 for padded tokens and 1 otherwise.
self.utterance_mask = [0] * self._max_seq_length
# Start and inclusive end character indices in the original utterance
# corresponding to the tokens. This is used to obtain the character indices
# from the predicted subword indices during inference.
# NOTE: A positive value indicates the character indices in the user
# utterance whereas a negative value indicates the character indices in the
# system utterance. The indices are offset by 1 to prevent ambiguity in the
# 0 index, which could be in either the user or system utterance by the
# above convention. Now the 0 index corresponds to padded tokens.
self.start_char_idx = [0] * self._max_seq_length
self.end_char_idx = [0] * self._max_seq_length
# Number of categorical slots present in the service.
self.num_categorical_slots = 0
# The status of each categorical slot in the service.
self.categorical_slot_status = [STATUS_OFF] * schema_config["MAX_NUM_CAT_SLOT"]
# Masks out categorical status for padded cat slots
self.cat_slot_status_mask = [0] * len(self.categorical_slot_status)
# Number of values taken by each categorical slot.
self.num_categorical_slot_values = [0] * schema_config["MAX_NUM_CAT_SLOT"]
# The index of the correct value for each categorical slot.
self.categorical_slot_values = [0] * schema_config["MAX_NUM_CAT_SLOT"]
# Masks out categorical slots values for slots not used in the service
self.cat_slot_values_mask = [
[0] * schema_config["MAX_NUM_VALUE_PER_CAT_SLOT"] for _ in range(schema_config["MAX_NUM_CAT_SLOT"])
]
# Number of non-categorical slots present in the service.
self.num_noncategorical_slots = 0
# The status of each non-categorical slot in the service.
self.noncategorical_slot_status = [STATUS_OFF] * schema_config["MAX_NUM_NONCAT_SLOT"]
# Masks out non-categorical status for padded cat slots
self.noncat_slot_status_mask = [0] * len(self.noncategorical_slot_status)
# The index of the starting subword corresponding to the slot span for a
# non-categorical slot value.
self.noncategorical_slot_value_start = [0] * schema_config["MAX_NUM_NONCAT_SLOT"]
# The index of the ending (inclusive) subword corresponding to the slot span
# for a non-categorical slot value.
self.noncategorical_slot_value_end = [0] * schema_config["MAX_NUM_NONCAT_SLOT"]
# Total number of slots present in the service. All slots are included here
# since every slot can be requested.
self.num_slots = 0
# Takes value 1 if the corresponding slot is requested, 0 otherwise.
self.requested_slot_status = [STATUS_OFF] * (
schema_config["MAX_NUM_CAT_SLOT"] + schema_config["MAX_NUM_NONCAT_SLOT"]
)
# Masks out requested slots that are not used for the service
self.requested_slot_mask = [0] * len(self.requested_slot_status)
# Total number of intents present in the service.
self.num_intents = 0
# Takes value 1 if the intent is active, 0 otherwise.
self.intent_status = [STATUS_OFF] * schema_config["MAX_NUM_INTENT"]
# Masks out intents that are not used for the service, [1] for none intent
self.intent_status_mask = [1] + [0] * len(self.intent_status)
# Label for active intent in the turn
self.intent_status_labels = 0
@property
def readable_summary(self):
"""Get a readable dict that summarizes the attributes of an InputExample."""
seq_length = sum(self.utterance_mask)
utt_toks = self._tokenizer.ids_to_tokens(self.utterance_ids[:seq_length])
utt_tok_mask_pairs = list(zip(utt_toks, self.utterance_segment[:seq_length]))
active_intents = [
self.service_schema.get_intent_from_id(idx)
for idx, s in enumerate(self.intent_status)
if s == STATUS_ACTIVE
]
if len(active_intents) > 1:
raise ValueError("Should not have multiple active intents in a single service.")
active_intent = active_intents[0] if active_intents else ""
slot_values_in_state = {}
for idx, s in enumerate(self.categorical_slot_status):
if s == STATUS_ACTIVE:
value_id = self.categorical_slot_values[idx]
slot_values_in_state[
self.service_schema.get_categorical_slot_from_id(idx)
] = self.service_schema.get_categorical_slot_value_from_id(idx, value_id)
elif s == STATUS_DONTCARE:
slot_values_in_state[self.service_schema.get_categorical_slot_from_id(idx)] = STR_DONTCARE
for idx, s in enumerate(self.noncategorical_slot_status):
if s == STATUS_ACTIVE:
slot = self.service_schema.get_non_categorical_slot_from_id(idx)
start_id = self.noncategorical_slot_value_start[idx]
end_id = self.noncategorical_slot_value_end[idx]
# Token list is consisted of the subwords that may start with "##". We
# remove "##" to reconstruct the original value. Note that it's not a
# strict restoration of the original string. It's primarily used for
# debugging.
# ex. ["san", "j", "##ose"] --> "san jose"
readable_value = " ".join(utt_toks[start_id : end_id + 1]).replace(" ##", "")
slot_values_in_state[slot] = readable_value
elif s == STATUS_DONTCARE:
slot = self.service_schema.get_non_categorical_slot_from_id(idx)
slot_values_in_state[slot] = STR_DONTCARE
summary_dict = {
"utt_tok_mask_pairs": utt_tok_mask_pairs,
"utt_len": seq_length,
"num_categorical_slots": self.num_categorical_slots,
"num_categorical_slot_values": self.num_categorical_slot_values,
"num_noncategorical_slots": self.num_noncategorical_slots,
"service_name": self.service_schema.service_name,
"active_intent": active_intent,
"slot_values_in_state": slot_values_in_state,
}
return summary_dict
def add_utterance_features(
self, system_tokens, system_inv_alignments, user_tokens, user_inv_alignments, system_utterance, user_utterance
):
"""Add utterance related features input to bert.
Note: this method modifies the system tokens and user_tokens in place to
make their total length <= the maximum input length for BERT model.
Args:
system_tokens: a list of strings which represents system utterance.
system_inv_alignments: a list of tuples which denotes the start and end
charater of the tpken that a bert token originates from in the original
system utterance.
user_tokens: a list of strings which represents user utterance.
user_inv_alignments: a list of tuples which denotes the start and end
charater of the token that a bert token originates from in the original
user utterance.
"""
# Make user-system utterance input (in BERT format)
# Input sequence length for utterance BERT encoder
max_utt_len = self._max_seq_length
# Modify lengths of sys & usr utterance so that length of total utt
# (including cls_token, setp_token, sep_token) is no more than max_utt_len
is_too_long = truncate_seq_pair(system_tokens, user_tokens, max_utt_len - 3)
if is_too_long:
logging.debug(f'Utterance sequence truncated in example id - {self.example_id}.')
# Construct the tokens, segment mask and valid token mask which will be
# input to BERT, using the tokens for system utterance (sequence A) and
# user utterance (sequence B).
utt_subword = []
utt_seg = []
utt_mask = []
start_char_idx = []
end_char_idx = []
utt_subword.append(self._tokenizer.cls_token)
utt_seg.append(0)
utt_mask.append(1)
start_char_idx.append(0)
end_char_idx.append(0)
for subword_idx, subword in enumerate(system_tokens):
utt_subword.append(subword)
utt_seg.append(0)
utt_mask.append(1)
st, en = system_inv_alignments[subword_idx]
start_char_idx.append(-(st + 1))
end_char_idx.append(-(en + 1))
utt_subword.append(self._tokenizer.sep_token)
utt_seg.append(0)
utt_mask.append(1)
start_char_idx.append(0)
end_char_idx.append(0)
for subword_idx, subword in enumerate(user_tokens):
utt_subword.append(subword)
utt_seg.append(1)
utt_mask.append(1)
st, en = user_inv_alignments[subword_idx]
start_char_idx.append(st + 1)
end_char_idx.append(en + 1)
utt_subword.append(self._tokenizer.sep_token)
utt_seg.append(1)
utt_mask.append(1)
start_char_idx.append(0)
end_char_idx.append(0)
utterance_ids = self._tokenizer.tokens_to_ids(utt_subword)
# Zero-pad up to the BERT input sequence length.
while len(utterance_ids) < max_utt_len:
utterance_ids.append(0)
utt_seg.append(0)
utt_mask.append(0)
start_char_idx.append(0)
end_char_idx.append(0)
self.utterance_ids = utterance_ids
self.utterance_segment = utt_seg
self.utterance_mask = utt_mask
self.start_char_idx = start_char_idx
self.end_char_idx = end_char_idx
self.user_utterances = user_utterance
self.system_utterance = system_utterance
def make_copy_with_utterance_features(self):
"""Make a copy of the current example with utterance features."""
new_example = InputExample(
schema_config=self.schema_config,
| |
import datetime
import pytz
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import timezone
from schedule.models import Calendar, Event, EventRelation, Rule
class TestEvent(TestCase):
def __create_event(self, title, start, end, cal):
return Event.objects.create(title=title, start=start, end=end, calendar=cal)
def __create_recurring_event(self, title, start, end, end_recurring, rule, cal):
return Event.objects.create(
title=title,
start=start,
end=end,
end_recurring_period=end_recurring,
rule=rule,
calendar=cal,
)
def test_edge_case_events(self):
cal = Calendar.objects.create(name="MyCal")
event_one = Event.objects.create(
title="Edge case event test one",
start=datetime.datetime(2013, 1, 5, 8, 0, tzinfo=pytz.utc),
end=datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
calendar=cal,
)
event_two = Event.objects.create(
title="Edge case event test two",
start=datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
end=datetime.datetime(2013, 1, 5, 12, 0, tzinfo=pytz.utc),
calendar=cal,
)
occurrences_two = event_two.get_occurrences(
datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 12, 0, tzinfo=pytz.utc),
)
self.assertEqual(1, len(occurrences_two))
occurrences_one = event_one.get_occurrences(
datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 12, 0, tzinfo=pytz.utc),
)
self.assertEqual(0, len(occurrences_one))
def test_recurring_event_get_occurrences(self):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="WEEKLY")
recurring_event = self.__create_recurring_event(
"Recurrent event test get_occurrence",
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
occurrences = recurring_event.get_occurrences(
start=datetime.datetime(2008, 1, 12, 0, 0, tzinfo=pytz.utc),
end=datetime.datetime(2008, 1, 20, 0, 0, tzinfo=pytz.utc),
)
self.assertEqual(
["{} to {}".format(o.start, o.end) for o in occurrences],
[
"2008-01-12 08:00:00+00:00 to 2008-01-12 09:00:00+00:00",
"2008-01-19 08:00:00+00:00 to 2008-01-19 09:00:00+00:00",
],
)
def test_event_get_occurrences_after(self):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="WEEKLY")
self.__create_recurring_event(
"Recurrent event test get_occurrence",
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
event_one = self.__create_event(
"Edge case event test one",
datetime.datetime(2013, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
cal,
)
event_two = self.__create_event(
"Edge case event test two",
datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 12, 0, tzinfo=pytz.utc),
cal,
)
occurrences_two = event_two.get_occurrences(
datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 12, 0, tzinfo=pytz.utc),
)
self.assertEqual(1, len(occurrences_two))
occurrences_one = event_one.get_occurrences(
datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 12, 0, tzinfo=pytz.utc),
)
self.assertEqual(0, len(occurrences_one))
def test_recurring_event_get_occurrences_2(self):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="WEEKLY")
recurring_event = self.__create_recurring_event(
"Recurring event test",
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
occurrences = recurring_event.get_occurrences(
start=datetime.datetime(2008, 1, 12, 0, 0, tzinfo=pytz.utc),
end=datetime.datetime(2008, 1, 20, 0, 0, tzinfo=pytz.utc),
)
self.assertEqual(
["{} to {}".format(o.start, o.end) for o in occurrences],
[
"2008-01-12 08:00:00+00:00 to 2008-01-12 09:00:00+00:00",
"2008-01-19 08:00:00+00:00 to 2008-01-19 09:00:00+00:00",
],
)
def test_recurring_event_get_occurrences_after(self):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="WEEKLY")
recurring_event = self.__create_recurring_event(
"Recurrent event test get_occurrence",
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
occurrences = recurring_event.get_occurrences(
start=datetime.datetime(2008, 1, 5, tzinfo=pytz.utc),
end=datetime.datetime(2008, 1, 6, tzinfo=pytz.utc),
)
occurrence = occurrences[0]
occurrence2 = next(
recurring_event.occurrences_after(
datetime.datetime(2008, 1, 5, tzinfo=pytz.utc)
)
)
self.assertEqual(occurrence, occurrence2)
def test_recurring_event_with_moved_get_occurrences_after(self):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="WEEKLY")
recurring_event = self.__create_recurring_event(
"Recurrent event test get_occurrence",
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
occurrence = recurring_event.get_occurrence(
datetime.datetime(2008, 1, 12, 8, 0, tzinfo=pytz.utc)
)
occurrence.move(
datetime.datetime(2008, 1, 15, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 15, 9, 0, tzinfo=pytz.utc),
)
gen = recurring_event.occurrences_after(
datetime.datetime(2008, 1, 14, 8, 0, tzinfo=pytz.utc)
)
occurrence2 = next(gen)
self.assertEqual(occurrence, occurrence2)
def test_recurring_event_get_occurrence(self):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="WEEKLY")
event = self.__create_recurring_event(
"Recurrent event test get_occurrence",
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
occurrence = event.get_occurrence(
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc)
)
self.assertEqual(
occurrence.start, datetime.datetime(2008, 1, 5, 8, tzinfo=pytz.utc)
)
occurrence.save()
occurrence = event.get_occurrence(
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc)
)
self.assertTrue(occurrence.pk is not None)
def test_prevent_type_error_when_comparing_naive_and_aware_dates(self):
# this only test if the TypeError is raised
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="WEEKLY")
event = self.__create_recurring_event(
"Recurrent event test get_occurrence",
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
naive_date = datetime.datetime(2008, 1, 20, 0, 0)
self.assertIsNone(event.get_occurrence(naive_date))
@override_settings(USE_TZ=False)
def test_prevent_type_error_when_comparing_dates_when_tz_off(self):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="WEEKLY")
event = self.__create_recurring_event(
"Recurrent event test get_occurrence",
datetime.datetime(2008, 1, 5, 8, 0),
datetime.datetime(2008, 1, 5, 9, 0),
datetime.datetime(2008, 5, 5, 0, 0),
rule,
cal,
)
naive_date = datetime.datetime(2008, 1, 20, 0, 0)
self.assertIsNone(event.get_occurrence(naive_date))
@override_settings(USE_TZ=False)
def test_get_occurrences_when_tz_off(self):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="WEEKLY")
recurring_event = self.__create_recurring_event(
"Recurring event test",
datetime.datetime(2008, 1, 5, 8, 0),
datetime.datetime(2008, 1, 5, 9, 0),
datetime.datetime(2008, 5, 5, 0, 0),
rule,
cal,
)
occurrences = recurring_event.get_occurrences(
start=datetime.datetime(2008, 1, 12, 0, 0),
end=datetime.datetime(2008, 1, 20, 0, 0),
)
self.assertEqual(
["{} to {}".format(o.start, o.end) for o in occurrences],
[
"2008-01-12 08:00:00 to 2008-01-12 09:00:00",
"2008-01-19 08:00:00 to 2008-01-19 09:00:00",
],
)
def test_event_get_ocurrence(self):
cal = Calendar.objects.create(name="MyCal")
start = timezone.now() + datetime.timedelta(days=1)
event = self.__create_event(
"Non recurring event test get_occurrence",
start,
start + datetime.timedelta(hours=1),
cal,
)
occurrence = event.get_occurrence(start)
self.assertEqual(occurrence.start, start)
def test_occurrences_after_with_no_params(self):
cal = Calendar.objects.create(name="MyCal")
start = timezone.now() + datetime.timedelta(days=1)
event = self.__create_event(
"Non recurring event test get_occurrence",
start,
start + datetime.timedelta(hours=1),
cal,
)
occurrences = list(event.occurrences_after())
self.assertEqual(len(occurrences), 1)
self.assertEqual(occurrences[0].start, start)
self.assertEqual(occurrences[0].end, start + datetime.timedelta(hours=1))
def test_occurrences_with_recurrent_event_end_recurring_period_edge_case(self):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="DAILY")
start = timezone.now() + datetime.timedelta(days=1)
event = self.__create_recurring_event(
"Non recurring event test get_occurrence",
start,
start + datetime.timedelta(hours=1),
start + datetime.timedelta(days=10),
rule,
cal,
)
occurrences = list(event.occurrences_after())
self.assertEqual(len(occurrences), 11)
def test_occurrences_with_recurrent_event_end_recurring_period_edge_case_max_loop_lower(
self,
):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="DAILY")
start = timezone.now() + datetime.timedelta(days=1)
event = self.__create_recurring_event(
"Non recurring event test get_occurrence",
start,
start + datetime.timedelta(hours=1),
start + datetime.timedelta(days=10),
rule,
cal,
)
occurrences = list(event.occurrences_after(max_occurrences=4))
self.assertEqual(len(occurrences), 4)
def test_occurrences_with_recurrent_event_end_recurring_period_edge_case_max_loop_greater(
self,
):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="DAILY")
start = timezone.now() + datetime.timedelta(days=1)
event = self.__create_recurring_event(
"Non recurring event test get_occurrence",
start,
start + datetime.timedelta(hours=1),
start + datetime.timedelta(days=10),
rule,
cal,
)
occurrences = list(event.occurrences_after(max_occurrences=20))
self.assertEqual(len(occurrences), 11)
def test_occurrences_with_recurrent_event_no_end_recurring_period_max_loop(self):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="DAILY")
start = timezone.now() + datetime.timedelta(days=1)
event = self.__create_recurring_event(
"Non recurring event test get_occurrence",
start,
start + datetime.timedelta(hours=1),
start + datetime.timedelta(hours=10),
rule,
cal,
)
occurrences = list(event.occurrences_after(max_occurrences=1))
self.assertEqual(len(occurrences), 1)
def test_get_for_object(self):
user = User.objects.create_user("john", "<EMAIL>", "johnpassword")
event_relations = list(
Event.objects.get_for_object(user, "owner", inherit=False)
)
self.assertEqual(len(event_relations), 0)
Rule.objects.create(frequency="DAILY")
cal = Calendar.objects.create(name="MyCal")
event = self.__create_event(
"event test",
datetime.datetime(2013, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
cal,
)
events = list(Event.objects.get_for_object(user, "owner", inherit=False))
self.assertEqual(len(events), 0)
EventRelation.objects.create_relation(event, user, "owner")
events = list(Event.objects.get_for_object(user, "owner", inherit=False))
self.assertEqual(len(events), 1)
self.assertEqual(event, events[0])
def test_get_absolute(self):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="DAILY")
start = timezone.now() + datetime.timedelta(days=1)
event = self.__create_recurring_event(
"Non recurring event test get_occurrence",
start,
start + datetime.timedelta(hours=1),
start + datetime.timedelta(days=10),
rule,
cal,
)
url = event.get_absolute_url()
self.assertEqual(reverse("event", kwargs={"event_id": event.id}), url)
@override_settings(TIME_ZONE="Europe/Helsinki")
def test_recurring_event_get_occurrence_in_timezone(self):
cal = Calendar.objects.create(name="MyCal")
rule = Rule.objects.create(frequency="WEEKLY")
# Event start and end are UTC because that is what is coming
# from the database
event = self.__create_recurring_event(
"Recurrent event test get_occurrence",
datetime.datetime(2014, 3, 21, 6, 0, tzinfo=pytz.utc),
datetime.datetime(2014, 3, 21, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2014, 4, 11, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
tzinfo = pytz.timezone("Europe/Helsinki")
start = tzinfo.localize(datetime.datetime(2014, 3, 28, 8, 0)) # +2
occurrence = event.get_occurrence(start)
self.assertEqual(occurrence.start, start)
occurrence.save()
# DST change on March 30th from +2 to +3
start = tzinfo.localize(datetime.datetime(2014, 4, 4, 8, 0)) # +3
occurrence = event.get_occurrence(start)
self.assertEqual(occurrence.start, start)
def test_recurring_event_get_occurrence_different_end_timezone(self):
end_recurring = datetime.datetime(2016, 7, 30, 11, 0, tzinfo=pytz.utc)
event = self.__create_recurring_event(
"Recurring event with end_reccurring_date in different TZ",
datetime.datetime(2016, 7, 25, 10, 0, tzinfo=pytz.utc),
datetime.datetime(2016, 7, 25, 11, 0, tzinfo=pytz.utc),
end_recurring,
Rule.objects.create(frequency="DAILY"),
Calendar.objects.create(name="MyCal"),
)
tzinfo = pytz.timezone("Europe/Athens")
occurrences = event.get_occurrences(
tzinfo.localize(datetime.datetime(2016, 1, 1, 0, 0)),
tzinfo.localize(datetime.datetime(2016, 12, 31, 23, 59)),
)
self.assertEqual(occurrences[-1].end, end_recurring)
def test_recurring_event_get_occurrence_across_dst(self):
pacific = pytz.timezone("US/Pacific")
e_start = pacific.localize(datetime.datetime(2015, 3, 4, 9, 0))
e_end = e_start
recc_end = pacific.localize(datetime.datetime(2015, 3, 13, 9, 0))
event = self.__create_recurring_event(
"Recurring event with end_recurring_date that crosses a DST",
e_start,
e_end,
recc_end,
Rule.objects.create(frequency="WEEKLY"),
Calendar.objects.create(name="MyCal"),
)
occs = event.get_occurrences(
e_start, pacific.localize(datetime.datetime(2015, 3, 11, 10, 0))
)
self.assertEqual(
["{} | |
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2():
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill"))
def test_bfill2():
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill"))
def test_fillna_inplace():
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion():
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series():
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}), df.fillna({"a": 0, "b": 5, "d": 7})
)
# Series treated same as dict
df_equals(modin_df.fillna(df.max()), df.fillna(df.max()))
def test_fillna_dataframe():
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{"a": [np.nan, 10, 20, 30, 40], "b": [50, 60, 70, 80, 90], "foo": ["bar"] * 5},
index=list("VWXuZ"),
)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value():
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns():
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"]))
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.first(None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_csv(data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_csv(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_value(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.get_value(None, None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_values(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(data, n):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_hist(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.all(axis=axis, skipna=skipna)
pandas_result = pandas_df.all(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.all(axis=axis, skipna=skipna)
pandas_result = pandas_df.all(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_infer_objects(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_info(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# Test to make sure that it does not crash
modin_df.info(memory_usage="deep")
if not name_contains(request.node.name, ["empty_data"]):
with io.StringIO() as buf:
modin_df.info(buf=buf)
info_string = buf.getvalue()
assert "<class 'modin.pandas.dataframe.DataFrame'>\n" in info_string
assert "memory usage: " in info_string
assert (
"Data columns (total {} columns):".format(modin_df.shape[1])
in info_string
)
with io.StringIO() as buf:
modin_df.info(buf=buf, verbose=False, memory_usage=False)
info_string = buf.getvalue()
assert "memory usage: " not in info_string
assert (
"Columns: {0} entries, {1} to {2}".format(
modin_df.shape[1], modin_df.columns[0], modin_df.columns[-1]
)
in info_string
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_interpolate(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.interpolate()
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_is_copy(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.is_copy
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.items()
pandas_items = pandas_df.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.iteritems()
pandas_items = pandas_df.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iterrows(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterrows = modin_df.iterrows()
pandas_iterrows = pandas_df.iterrows()
for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows):
modin_index, modin_series = modin_row
pandas_index, pandas_series = pandas_row
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_itertuples(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# test default
modin_it_default = modin_df.itertuples()
pandas_it_default = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_default, pandas_it_default):
np.testing.assert_equal(modin_row, pandas_row)
# test all combinations of custom params
indices = [True, False]
names = [None, "NotPandas", "Pandas"]
for index in indices:
for name in names:
modin_it_custom = modin_df.itertuples(index=index, name=name)
pandas_it_custom = pandas_df.itertuples(index=index, name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ix(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.ix()
def test_join():
frame_data | |
belong to the group, and their access rights. This value will be empty. It will be removed from the payload response in an upcoming release. To retrieve user information on an artifact, please consider using the Get Group User APIs, or the PostWorkspaceInfo API with the getArtifactUser parameter.
workbooks: str[]
List of the workbooks ids that belong to the group. Available only for admin API calls.
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/admin/groups/{}".format(workspace_id)
body = {
"id": workspace_id
}
if capacityId != None:
body["capacityId"]=capacityId
if dashboards != None:
body["dashboards"] = dashboards
if dataflowStorageId != None:
body["dataflowStorageId"] = dataflowStorageId
if dataflows != None:
body["dataflows"] = dataflows
if datasets != None:
body["datasets"]=datasets
if description != None:
body["description"] = description
if isOnDedicatedCapacity != None:
body["isOnDedicatedCapacity"] = isOnDedicatedCapacity
if isReadOnly != None:
body["isReadOnly"] = isReadOnly
if name != None:
body["name"]=name
if pipelineId != None:
body["pipelineId"] = pipelineId
if reports != None:
body["reports"] = reports
if state != None:
body["state"] = state
if typee != None:
body["type"] = typee
if users != None:
body["users"] = users
if workbooks != None:
body["workbooks"] = workbooks
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.patch(url, json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def update_user_in_pipeline(self, pipeline_id, identifier, principalType, accessRight):
"""Grants user permissions to a specified deployment pipeline.
### Parameters
----
pipeline_id:
The Power Bi Deployment Pipeline id. You can take it from PBI Service URL
### Request Body
----
identifier: str
For Principal type 'User' provide UPN , otherwise provide Object ID of the principal. This is mandatory
principalType: principalType
The principal type (App, Group, User or None). This is mandatory.
accessRight: GroupUserAccessRight
accessRequired - Access rights a user has for the deployment pipeline. (Permission level: Admin). This is mandatory
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/admin/pipelines/{}/users".format(pipeline_id)
body = {
"identifier": identifier ,
"principalType": principalType ,
"accessRight": accessRight
}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def delete_user_from_pipeline(self, pipeline_id, identifier):
"""Removes user permissions from a specified deployment pipeline.
### Parameters
----
pipeline_id: str uuid
The deployment pipeline ID
identifier: str
For Principal type 'User' provide UPN , otherwise provide Object ID of the principal
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/admin/pipelines/{}/users/{}".format(pipeline_id, identifier)
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.delete(url, headers=headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def assign_workspaces_to_capacity_preview(self, tagetCapacityObjectId, workspacesToAssign):
"""Assigns the specified workspaces to the specified Premium capacity.
*** THIS REQUEST IS IN PREVIEW IN SIMPLEPBI ***
### Parameters
----
### Request Body
----
capacityMigrationAssignments: Assignment contract for migrating workspaces to premium capacity as tenant admin
targetCapacityObjectId: str
The premium capacity ID
workspacesToAssign: str[]
List of the workspace IDs to be migrated to premium capacity
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/admin/capacities/AssignWorkspaces"
body ={
"tagetCapacityObjectId":tagetCapacityObjectId,
"workspacesToAssign":workspacesToAssign
}
body_option2 ={
"capacityMigrationAssignments":[{
"tagetCapacityObjectId":tagetCapacityObjectId,
"workspacesToAssign":workspacesToAssign
}]
}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def unassign_workspaces_from_capacity_preview(self, workspacesToAssign):
"""Unassigns the specified workspaces from capacity.
*** THIS REQUEST IS IN PREVIEW IN SIMPLEPBI ***
### Parameters
----
### Request Body
----
workspacesToAssign: str[]
List of the workspace IDs to be migrated to premium capacity
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/admin/capacities/UnassignWorkspaces"
body ={
"workspacesToAssign":workspacesToAssign
}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_activity_events_preview(self, activity_date=None, return_pandas=False):
'''Returns a dict of pandas dataframe of audit activity events for a tenant.
*** THIS REQUEST IS IN PREVIEW IN SIMPLEPBI ***
The continuation token is automtaically used to get all the results in the date.
### Parameters
----
activity_date: str "yyyy-mm-dd"
The Single date to get events from the whole day.
If the date is not specify it will return yesterday events by default.
return_pandas: bool
Flag to specify if you want to return a dict response or a pandas dataframe of events.
### Returns
----
If return_pandas = True returns a Pandas dataframe concatenating iterations otherwise it returns a dict of the response
### Limitations
----
Maximum 200 requests per hour.
'''
columnas = ['Id', 'RecordType', 'CreationTime', 'Operation', 'OrganizationId',
'UserType', 'UserKey', 'Workload', 'UserId', 'ClientIP', 'UserAgent',
'Activity', 'ItemName', 'WorkSpaceName', 'DatasetName', 'WorkspaceId',
'ObjectId', 'DatasetId', 'DataConnectivityMode', 'IsSuccess',
'RequestId', 'ActivityId', 'TableName', 'LastRefreshTime']
df_total = pd.DataFrame(columns=columnas)
dict_total = {'activityEventEntities': [] }
list_total = []
if activity_date == None:
activity_date = date.today()- timedelta(days=1)
else:
activity_date = date(int(activity_date.split("-")[0]),int(activity_date.split("-")[1]),int(activity_date.split("-")[2]))
start = activity_date.strftime("'%Y-%m-%dT%H:%M:00.000Z'")
end = activity_date.strftime("'%Y-%m-%dT23:59:59.000Z'")
url = "https://api.powerbi.com/v1.0/myorg/admin/activityevents?startDateTime={}&endDateTime={}".format(start, end)
ban = True
contar = 0
try:
while(ban):
res = requests.get(url,
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
)
if return_pandas:
js = json.dumps(res.json()["activityEventEntities"])
df = pd.read_json(js)
#print(df.head())
df_total = df_total.append(df, sort=True, ignore_index=True)
print("Building dataframe iteration: ", str(contar))
#print(df_total.head())
else:
if res.json()["activityEventEntities"]:
#append_value(dict_total, "activityEventEntities", res.json()["activityEventEntities"][0])
list_total.extend(res.json()["activityEventEntities"])
print("Building dict iteration: ", str(contar))
print(res.status_code)
contar = contar +1
print(res.json()["continuationUri"])
if res.json()["continuationUri"] == None:
ban=False
url = res.json()["continuationUri"]
if return_pandas:
return df_total
else:
dict_total = {'activityEventEntities': list_total }
return dict_total
except requests.exceptions.Timeout:
print("ERROR: The request method has exceeded the Timeout")
except requests.exceptions.TooManyRedirects:
print("ERROR: Bad URL try a different one")
except requests.exceptions.RequestException as e:
print("Catastrophic error.")
raise SystemExit(e)
except Exception as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
def get_modified_workspaces_preview(self, excludePersonalWorkspaces=True, modifiedSince=None):
"""Gets a list of workspace IDs in the organization. This is a preview API call.
*** THIS REQUEST IS IN PREVIEW IN SIMPLEPBI ***
### Parameters
----
excludePersonalWorkspaces: bool
Whether to exclude personal workspaces
modifiedSince: str-datetime
format %Y-%m-%dT%H:%M:00.000Z
### Returns
----
Dict:
Returns a list of list that contains groups of maximum 100 workspaces.
"""
lista_total = []
contador = 100
#datetime.strptime("2021-01-01 01:55:19", "%Y-%m-%d %H:%M:%S")
#modify_date = modifiedSince.strftime("'%Y-%m-%dT%H:%M:00.000Z'")
try:
url = "https://api.powerbi.com/v1.0/myorg/admin/workspaces/modified?excludePersonalWorkspaces={}".format(excludePersonalWorkspaces)
if modifiedSince != None:
url = url + "&modifiedSince={}".format(modifiedSince)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res = res.json()
lista = [res[i]['id'] for i in range(len(res))]
for item in range(len(lista)):
if lista[item*100:item*100+100] != []:
lista_total.append(lista[item*100:item*100+100])
else:
break
return lista_total
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def post_workspace_info(self, workspaces, lineage=True, datasourceDetails=True, datasetSchema=True, datasetExpressions=True, getArtifactUsers=True):
"""Initiates a call to receive metadata for the requested list of workspaces. This is a preview API call.
*** THIS REQUEST IS IN PREVIEW IN SIMPLEPBI ***
### Parameters
----
### Request Body
----
workspaces: str[]
List of the workspace IDs to ask for scan (it can't contain more than 100 workspaces)
### Returns
----
Scan id in uuid format. 202 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/admin/workspaces/getInfo?lineage={}&datasourceDetails={}&datasetSchema={}&datasetExpressions={}&getArtifactUsers={}".format(lineage, datasourceDetails, datasetSchema, datasetExpressions, getArtifactUsers)
body ={
"workspaces":workspaces
}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
res.raise_for_status()
return res.json()["id"]
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_scan_status_preview(self, scan_id):
"""Gets the scan status for the specified scan. This is a preview API call.
*** THIS REQUEST IS IN | |
from django.db import models
from django.contrib.gis.db import models
from stdimage.models import StdImageField
from django.utils.text import slugify
from markdown import markdown
from django.utils.safestring import mark_safe
from django.conf import settings
import bleach
from unidecode import unidecode
from django.urls import reverse
class Page(models.Model):
name = models.CharField(max_length=255, db_index=True)
content = models.TextField(null=True, blank=True)
content_html = models.TextField(null=True, blank=True, help_text="Auto-generated - do NOT edit")
image = StdImageField(upload_to="pages", variations={"thumbnail": (350, 350), "medium": (800, 600), "large": (1280, 1024)}, null=True, blank=True)
position = models.PositiveSmallIntegerField(db_index=True)
slug = models.SlugField(max_length=255)
FORMATS = (
("HTML", "HTML"),
("MARK", "Markdown"),
("MARK_HTML", "Markdown and HTML"),
)
format = models.CharField(max_length=9, choices=FORMATS)
def __str__(self):
return self.name
def get_content(self):
# The content field is already sanitized, according to the settings (see the save() function below)
# So when we retrieve the html content we can trust this is safe, and will mark it as such
# We avoid using |safe in templates -- to centralize the effort to sanitize input
if self.content:
return mark_safe(self.content_html)
else:
return ""
class Meta:
ordering = ["position"]
def save(self, *args, **kwargs):
if not self.content:
self.content_html = None
elif self.format == "HTML":
# Here it wouldn't hurt to apply bleach and take out unnecessary tags
self.content_html = self.content
elif self.format == "MARK_HTML":
# Here it wouldn't hurt to apply bleach and take out unnecessary tags
self.content_html = markdown(self.content)
elif self.format == "MARK":
self.content_html = markdown(bleach.clean(self.content))
if not self.slug:
self.slug = slugify(unidecode(self.name))
super().save(*args, **kwargs)
class Organization(models.Model):
name = models.CharField(max_length=255, db_index=True)
description = models.TextField(null=True, blank=True)
logo = StdImageField(upload_to="pages", variations={"thumbnail": (350, 350), "medium": (800, 600), "large": (1280, 1024)}, delete_orphans=True)
url = models.CharField(max_length=255, null=True, blank=True)
part_of_fcc = models.BooleanField(default=True, db_index=True)
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
class Document(models.Model):
name = models.CharField(max_length=255, db_index=True)
class Type(models.IntegerChoices):
UNKNOWN = 0, "Unknown"
NATURE = 1, "Existing stepping-stones"
CONNECTOR = 2, "Corridor connectors"
TRANSPORT = 3, "Transport"
POTENTIAL = 4, "Possible stepping-stones"
CONTEXT = 5, "Context"
TEACHING = 6, "Teaching resources"
GENERAL = 7, "General document repository"
type = models.IntegerField(choices=Type.choices, db_index=True, default=0)
author = models.CharField(max_length=255, null=True, blank=True)
url = models.URLField(max_length=255, null=True, blank=True)
content = models.TextField("Description", null=True, blank=True)
color = models.CharField(max_length=50, null=True, blank=True, help_text="See https://htmlcolors.com/color-names for an overview of possible color names")
meta_data = models.JSONField(null=True, blank=True, help_text="Only to be edited if you know what this does - otherwise, please do not change")
active = models.BooleanField(default=True, db_index=True)
include_in_site_analysis = models.BooleanField(default=False, db_index=True)
file = models.FileField(null=True, blank=True, upload_to="files")
def __str__(self):
return self.name
def get_file_size(self):
if self.file:
return self.file.size/1024/1024
else:
return None
@property
def get_dataviz(self):
if self.meta_data and "dataviz" in self.meta_data:
return self.meta_data["dataviz"]
else:
return {}
@property
def get_absolute_url(self):
return "/maps/" + str(self.id)
# Returns the opacity used for the background color in maps
# Some layers, such as the boundary layer, should be fully
# transparent so we only see a border.
@property
def get_opacity(self):
try:
return self.meta_data["opacity"]
except:
return 0.4 # Default background color opacity in the maps
class ReferenceSpace(models.Model):
name = models.CharField(max_length=255, db_index=True)
content = models.TextField(null=True, blank=True)
geometry = models.GeometryField(null=True, blank=True)
photo = models.ForeignKey("Photo", on_delete=models.CASCADE, null=True, blank=True, related_name="referencespace")
source = models.ForeignKey(Document, on_delete=models.CASCADE, null=True, blank=True, related_name="spaces")
temp_source_id = models.IntegerField(null=True, blank=True, help_text="Only used when importing data")
meta_data = models.JSONField(null=True, blank=True)
def __str__(self):
return f"{self.name} ({self.source.name})"
@property
def get_absolute_url(self):
if hasattr(self, "garden"):
return f"/gardens/{self.id}/"
else:
return f"/space/{self.id}/"
@property
def get_lat(self):
try:
return self.geometry.centroid[1]
except:
return None
@property
def get_lng(self):
try:
return self.geometry.centroid[0]
except:
return None
def get_vegetation_type(self):
v = VegetationType.objects.filter(spaces=self)
return v[0] if v else None
@property
def get_photo_medium(self):
if self.photo:
return self.photo.image.medium.url
else:
return settings.MEDIA_URL + "/placeholder.png"
def __str__(self):
return self.name if self.name else "Unnamed garden"
@property
def suburb(self):
if not self.geometry:
return None
suburb = ReferenceSpace.objects.filter(source_id=334434, geometry__intersects=self.geometry)
if suburb:
return suburb[0].name.title()
else:
return None
def get_popup(self):
content = f"<h4>{self.name}</h4>"
if self.photo:
content = content + f"<a class='d-block' href='{self.get_absolute_url}'><img alt='{self.name}' src='{self.photo.image.thumbnail.url}' /></a><hr>"
content = content + f"<a href='{self.get_absolute_url}'>View details</a>"
return mark_safe(content)
class Meta:
ordering = ["name"]
class Garden(ReferenceSpace):
active = models.BooleanField(default=True, db_index=True)
original = models.JSONField(null=True, blank=True)
class PhaseStatus(models.IntegerChoices):
PENDING = 1, "Pending"
IN_PROGRESS = 2, "In progress"
COMPLETED = 3, "Completed"
phase_assessment = models.IntegerField("Initial ecological and social assessment", choices=PhaseStatus.choices, db_index=True, null=True)
phase_alienremoval = models.IntegerField("Alien removal", choices=PhaseStatus.choices, db_index=True, null=True)
phase_landscaping = models.IntegerField("Landscaping", choices=PhaseStatus.choices, db_index=True, null=True)
phase_pioneers = models.IntegerField("Planting of pioneer species", choices=PhaseStatus.choices, db_index=True, null=True)
phase_birdsinsects = models.IntegerField("Planting of bird and insect species", choices=PhaseStatus.choices, db_index=True, null=True)
phase_specialists = models.IntegerField("Planting of specialist species", choices=PhaseStatus.choices, db_index=True, null=True)
phase_placemaking = models.IntegerField("Placemaking", choices=PhaseStatus.choices, db_index=True, null=True)
organizations = models.ManyToManyField(Organization, blank=True)
vegetation_type = models.ForeignKey("VegetationType", on_delete=models.CASCADE, null=True, blank=True, related_name="gardens")
def __str__(self):
return self.name
def save(self, *args, **kwargs):
vegetation = Document.objects.get(pk=983172)
veg = None
if self.geometry:
try:
veg = vegetation.spaces.get(geometry__intersects=self.geometry.centroid)
veg = veg.get_vegetation_type()
except:
veg = None
self.vegetation_type = veg
super().save(*args, **kwargs)
class Event(models.Model):
name = models.CharField(max_length=255, db_index=True)
content = models.TextField(null=True, blank=True)
photo = models.ForeignKey("Photo", on_delete=models.CASCADE, null=True, blank=True, related_name="events")
start = models.DateTimeField(null=True, blank=True)
end = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.name
class Genus(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
verbose_name_plural = "Genera"
def get_absolute_url(self):
return reverse("genus", args=[self.id])
class Family(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
def get_absolute_url(self):
return reverse("family", args=[self.id])
class Redlist(models.Model):
name = models.CharField(max_length=255)
code = models.CharField(max_length=2)
css = models.CharField(max_length=10, null=True, blank=True)
def __str__(self):
return f"{self.name} ({self.code})"
@property
def get_code(self):
return mark_safe(f"<span class='badge bg-{self.css}'>{self.code}</span>")
@property
def formatted(self):
return mark_safe(f"{self.get_code} {self.name}")
class Meta:
verbose_name_plural = "Redlist"
class VegetationType(models.Model):
name = models.CharField(max_length=255, db_index=True)
description = models.TextField(null=True, blank=True)
historical_cover = models.PositiveSmallIntegerField(help_text="Cover in km2")
cape_town_cover = models.FloatField(help_text="In %")
current_cape_town_area = models.FloatField(help_text="In km2")
conserved_cape_town = models.PositiveSmallIntegerField(help_text="Conserved or managed, in km2")
redlist = models.ForeignKey(Redlist, on_delete=models.CASCADE)
slug = models.SlugField(max_length=255)
spaces = models.ManyToManyField(ReferenceSpace, blank=True, limit_choices_to={"source_id": 983172})
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("vegetation_type", args=[self.slug])
class Meta:
ordering = ["name"]
class SpeciesFeatures(models.Model):
name = models.CharField(max_length=255, db_index=True)
class Type(models.IntegerChoices):
ANIMALS = 1, "Animal-friendly"
SITE = 2, "Tolerances/sites"
GROWTH = 3, "Growth features"
OTHER = 4, "Other"
type = models.IntegerField(choices=Type.choices, db_index=True, default=0)
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
class Species(models.Model):
name = models.CharField(max_length=255, db_index=True)
common_name = models.CharField(max_length=255, db_index=True, null=True, blank=True)
common_name_xh = models.CharField(max_length=255, db_index=True, null=True, blank=True)
common_name_af = models.CharField(max_length=255, db_index=True, null=True, blank=True)
redlist = models.ForeignKey(Redlist, on_delete=models.CASCADE, null=True, blank=True)
links = models.JSONField(null=True, blank=True)
animals = models.JSONField(null=True, blank=True)
soils = models.JSONField(null=True, blank=True)
properties = models.JSONField(null=True, blank=True)
propagation_seed = models.TextField(null=True, blank=True)
propagation_cutting = models.TextField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
genus = models.ForeignKey(Genus, on_delete=models.CASCADE, related_name="species")
family = models.ForeignKey(Family, on_delete=models.CASCADE, null=True, blank=True, related_name="species")
features = models.ManyToManyField(SpeciesFeatures, blank=True, related_name="species")
vegetation_types = models.ManyToManyField(VegetationType, blank=True, related_name="species")
photo = models.ForeignKey("Photo", on_delete=models.CASCADE, null=True, blank=True, related_name="main_species")
meta_data = models.JSONField(null=True, blank=True)
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
verbose_name_plural = "Species"
@property
def get_photo_medium(self):
if self.photo:
return self.photo.image.medium.url
else:
return settings.MEDIA_URL + "/placeholder.png"
@property
def get_photo_thumbnail(self):
if self.photo:
return self.photo.image.thumbnail.url
else:
return settings.MEDIA_URL + "/placeholder.png"
def old(self):
return self.meta_data.get("original")
def get_links(self):
links = {}
original = self.meta_data.get("original")
if original.get("link"):
link = original.get("link")
if "wikipedia" in link:
links["Wikipedia"] = link
elif "pza" in link:
links["PlantZA"] = link
elif "redlist" in link:
links["Redlist"] = link
else:
links[link] = link
if original.get("link_plantza"):
links["PlantZA"] = original.get("link_plantza")
if original.get("link_wikipedia"):
links["Wikipedia"] = original.get("link_wikipedia")
if original.get("link_extra"):
links["More information"] = original.get("link_extra")
if original.get("link_redlist"):
links["Redlist"] = original.get("link_redlist")
return links
class Photo(models.Model):
name = models.CharField(max_length=255, db_index=True, null=True, blank=True)
description = models.TextField(null=True, blank=True)
image = StdImageField(upload_to="photos", variations={"thumbnail": (350, 350), "medium": (800, 600), "large": (1280, 1024)}, delete_orphans=True)
position = models.PositiveSmallIntegerField(db_index=True, default=1)
date = models.DateField(auto_now_add=True)
upload_date = models.DateTimeField(auto_now_add=True)
author = models.CharField(max_length=255, db_index=True, null=True, blank=True)
garden = models.ForeignKey(Garden, on_delete=models.CASCADE, null=True, blank=True, related_name="photos")
event = models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True, related_name="photos")
species = models.ForeignKey(Species, on_delete=models.CASCADE, null=True, blank=True, related_name="photos")
old_id = models.IntegerField(db_index=True, null=True, blank=True) # Delete after migration is complete
original = models.JSONField(null=True, blank=True)
def __str__(self):
if self.name:
return self.name
else:
return f"Photo {self.id}"
@property
def get_photo_medium(self):
return self.image.medium.url
@property
def get_photo_thumbnail(self):
return self.image.thumbnail.url
class Meta:
ordering = ["position", "date"]
class Corridor(models.Model):
name = models.CharField(max_length=255)
general_description = models.TextField(null=True, blank=True)
social_description = models.TextField(null=True, blank=True)
image = StdImageField(upload_to="corridors", variations={"thumbnail": (350, 350), "medium": (800, 600), "large": (1280, 1024)}, delete_orphans=True)
wards = models.JSONField(null=True, blank=True)
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
@property
def get_absolute_url(self):
return f"/corridors/rivers/{self.id}/"
def get_image_size(self):
return self.image.size/1024
class Newsletter(models.Model):
email = models.CharField(max_length=255)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return | |
<reponame>annapowellsmith/openpresc
from contextlib import contextmanager
import subprocess
import sys
import tempfile
import warnings
from google.cloud import bigquery as gcbq
from google.cloud.exceptions import Conflict, NotFound
from six import reraise
import pandas as pd
from django.conf import settings
from django.db.models import fields as model_fields
from django.db.models.fields import related as related_fields
from gcutils.storage import Client as StorageClient
from gcutils.table_dumper import TableDumper
DATASETS = {
"hscic": settings.BQ_HSCIC_DATASET,
"measures": settings.BQ_MEASURES_DATASET,
"tmp_eu": settings.BQ_TMP_EU_DATASET,
"dmd": settings.BQ_DMD_DATASET,
"archive": settings.BQ_ARCHIVE_DATASET,
"prescribing_export": settings.BQ_PRESCRIBING_EXPORT_DATASET,
"public": settings.BQ_PUBLIC_DATASET,
"scmd": settings.BQ_SCMD_DATASET,
}
try:
DATASETS["test"] = settings.BQ_TEST_DATASET
except AttributeError:
pass
class BigQueryExportError(Exception):
pass
@contextmanager
def exception_sql_printer(sql):
"""If there is an exception, prepend line-numbered SQL to the
the exception message
"""
try:
yield
except Exception as e:
msg = []
for n, line in enumerate(sql.splitlines()):
msg.append("{:>4}: {}".format(n + 1, line))
msg = "\n".join(msg)
msg = str(e) + "\n\n" + msg
reraise(type(e), type(e)(msg), sys.exc_info()[2])
class Client(object):
def __init__(self, dataset_key=None):
self.project = settings.BQ_PROJECT
# If this raises a DefaultCredentialsError:
# * on a developer's machine, run `gcloud auth application-default login`
# to use OAuth
# * elsewhere, ensure that GOOGLE_APPLICATION_CREDENTIALS is set and
# points to a valid set of credentials for a service account
#
# A warning is raised when authenticating with OAuth, recommending that
# server applications use a service account. We can ignore this.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.gcbq_client = gcbq.Client(project=self.project)
self.dataset_key = dataset_key
if dataset_key is None:
self.dataset_id = None
self.dataset = None
else:
self.dataset_id = DATASETS[dataset_key]
dataset_ref = self.gcbq_client.dataset(self.dataset_id)
self.dataset = gcbq.Dataset(dataset_ref)
def run_job(self, method_name, args, config_opts, config_default_opts):
job_config = {
"copy_table": gcbq.CopyJobConfig,
"extract_table": gcbq.ExtractJobConfig,
"load_table_from_file": gcbq.LoadJobConfig,
"load_table_from_uri": gcbq.LoadJobConfig,
"query": gcbq.QueryJobConfig,
}[method_name]()
for k, v in config_default_opts.items():
setattr(job_config, k, v)
for k, v in config_opts.items():
setattr(job_config, k, v)
method = getattr(self.gcbq_client, method_name)
job = method(*args, job_config=job_config)
if getattr(job_config, "dry_run", False):
return []
else:
return job.result()
def list_jobs(self):
return self.gcbq_client.list_jobs()
def list_tables(self):
try:
# We need to consume the iterator here in order to trigger any errors
return list(self.gcbq_client.list_tables(self.dataset))
except NotFound as e:
# Treat a missing dataset as having no tables. This is consistent
# with our approach of implicitly creating datasets on write
# operations
if dataset_is_missing(e):
return []
else:
raise
def create_dataset(self):
self.dataset.location = settings.BQ_LOCATION
self.dataset.default_table_expiration_ms = (
settings.BQ_DEFAULT_TABLE_EXPIRATION_MS
)
self.gcbq_client.create_dataset(self.dataset)
def delete_dataset(self):
for table_list_item in self.gcbq_client.list_tables(self.dataset):
self.gcbq_client.delete_table(table_list_item.reference)
self.gcbq_client.delete_dataset(self.dataset)
def create_table(self, table_id, schema):
table_ref = self.dataset.table(table_id)
table = gcbq.Table(table_ref, schema=schema)
try:
self.gcbq_client.create_table(table)
except NotFound as e:
if not dataset_is_missing(e):
raise
self.create_dataset()
self.gcbq_client.create_table(table)
return Table(table_ref, self)
def delete_table(self, table_id):
table_ref = self.dataset.table(table_id)
self.gcbq_client.delete_table(table_ref)
def get_table(self, table_id):
table_ref = self.dataset.table(table_id)
return Table(table_ref, self)
def get_or_create_table(self, table_id, schema):
try:
table = self.create_table(table_id, schema)
except Conflict:
table = self.get_table(table_id)
return table
def create_storage_backed_table(self, table_id, schema, gcs_path):
gcs_uri = "gs://{}/{}".format(self.project, gcs_path)
schema_as_dict = [
{"name": s.name, "type": s.field_type.lower()} for s in schema
]
resource = {
"tableReference": {"tableId": table_id},
"externalDataConfiguration": {
"csvOptions": {"skipLeadingRows": "1"},
"sourceFormat": "CSV",
"sourceUris": [gcs_uri],
"schema": {"fields": schema_as_dict},
},
}
path = "/projects/{}/datasets/{}/tables".format(self.project, self.dataset_id)
try:
self.gcbq_client._connection.api_request(
method="POST", path=path, data=resource
)
except NotFound as e:
if not dataset_is_missing(e):
raise
self.create_dataset()
self.gcbq_client._connection.api_request(
method="POST", path=path, data=resource
)
return self.get_table(table_id)
def create_table_with_view(self, table_id, sql, legacy):
assert "{project}" in sql
sql = interpolate_sql(sql, project=self.project)
table_ref = self.dataset.table(table_id)
table = gcbq.Table(table_ref)
table.view_query = sql
table.view_use_legacy_sql = legacy
try:
self.gcbq_client.create_table(table)
except NotFound as e:
if not dataset_is_missing(e):
raise
self.create_dataset()
self.gcbq_client.create_table(table)
return Table(table_ref, self)
def query(self, sql, substitutions=None, legacy=False, **options):
default_options = {"use_legacy_sql": legacy}
substitutions = substitutions or {}
sql = interpolate_sql(sql, **substitutions)
args = [sql]
with exception_sql_printer(sql):
iterator = self.run_job("query", args, options, default_options)
return Results(iterator)
def query_into_dataframe(self, sql, legacy=False):
sql = interpolate_sql(sql)
kwargs = {
"project_id": self.project,
"dialect": "legacy" if legacy else "standard",
}
with exception_sql_printer(sql):
return pd.read_gbq(sql, **kwargs)
def upload_model(self, model, table_id=None):
if table_id is None:
table_id = model._meta.db_table
if self.dataset_key == "dmd":
table_id = table_id.split("_", 1)[1]
schema = build_schema_from_model(model)
table = self.get_or_create_table(table_id, schema)
# We reload the schema here, as when older BQ tables were created,
# Django DateFields were mapped to BQ TIMESTAMP fields. However, we
# now map them to DATE fields.
schema = table.gcbq_table.schema
columns = [
f.db_column or f.attname for f in model._meta.fields if not f.auto_created
]
timestamp_ixs = [
ix for ix, field in enumerate(schema) if field.field_type == "TIMESTAMP"
]
def transformer(record):
for ix in timestamp_ixs:
record[ix] = record[ix] + " 00:00:00"
return record
table.insert_rows_from_pg(model, schema, columns, transformer)
class Table(object):
def __init__(self, gcbq_table_ref, client):
self.gcbq_table_ref = gcbq_table_ref
self.client = client
self.gcbq_client = client.gcbq_client
try:
self.get_gcbq_table()
except NotFound:
self.gcbq_table = None
self.table_id = gcbq_table_ref.table_id
self.dataset_id = gcbq_table_ref.dataset_id
self.project = gcbq_table_ref.project
@property
def qualified_name(self):
return "{}.{}".format(self.dataset_id, self.table_id)
def run_job(self, *args):
return self.client.run_job(*args)
def get_gcbq_table(self):
self.gcbq_table = self.gcbq_client.get_table(self.gcbq_table_ref)
def get_rows(self):
if self.gcbq_table is None:
self.get_gcbq_table()
for row in self.gcbq_client.list_rows(self.gcbq_table):
yield row.values()
def get_rows_as_dicts(self):
if self.gcbq_table is None:
self.get_gcbq_table()
field_names = [field.name for field in self.gcbq_table.schema]
for row in self.get_rows():
yield row_to_dict(row, field_names)
def insert_rows_from_query(
self, sql, substitutions=None, legacy=False, dry_run=False, **options
):
default_options = {
"use_legacy_sql": legacy,
"dry_run": dry_run,
"allow_large_results": True,
"write_disposition": "WRITE_TRUNCATE",
"destination": self.gcbq_table_ref,
}
substitutions = substitutions or {}
sql = interpolate_sql(sql, **substitutions)
args = [sql]
with exception_sql_printer(sql):
try:
self.run_job("query", args, options, default_options)
except NotFound as e:
if not dataset_is_missing(e):
raise
self.client.create_dataset()
self.run_job("query", args, options, default_options)
def insert_rows_from_csv(self, csv_path, schema, **options):
default_options = {
"source_format": "text/csv",
"write_disposition": "WRITE_TRUNCATE",
"schema": schema,
}
# When we send a schema with a load_table_from_file job, our copy
# of the table metadata doesn't get updated, so we need to do this
# ourselves.
self.gcbq_table.schema = schema
with open(csv_path, "rb") as f:
args = [f, self.gcbq_table_ref]
self.run_job("load_table_from_file", args, options, default_options)
def insert_rows_from_pg(self, model, schema, columns=None, transformer=None):
if columns is None:
columns = [field.name for field in schema]
table_dumper = TableDumper(model, columns, transformer)
with tempfile.NamedTemporaryFile("w+t") as f:
table_dumper.dump_to_file(f)
f.seek(0)
self.insert_rows_from_csv(f.name, schema)
def insert_rows_from_storage(self, gcs_path, **options):
default_options = {"write_disposition": "WRITE_TRUNCATE"}
gcs_uri = "gs://{}/{}".format(self.project, gcs_path)
args = [gcs_uri, self.gcbq_table_ref]
self.run_job("load_table_from_uri", args, options, default_options)
def export_to_storage(self, storage_prefix, **options):
self.get_gcbq_table()
default_options = {"compression": "GZIP"}
destination_uri = "gs://{}/{}*.csv.gz".format(self.project, storage_prefix)
args = [self.gcbq_table, destination_uri]
result = self.run_job("extract_table", args, options, default_options)
if result.state != "DONE" or result.error_result:
raise BigQueryExportError(
"Export job failed with state {state}: {error}".format(
state=result.state, error=result.error_result
)
)
def delete_all_rows(self, **options):
default_options = {"use_legacy_sql": False}
sql = "DELETE FROM {} WHERE true".format(self.qualified_name)
args = [sql]
self.run_job("query", args, options, default_options)
def copy_to_new_dataset(self, new_dataset_key, **options):
default_options = {"location": settings.BQ_LOCATION}
dataset_ref = self.gcbq_client.dataset(DATASETS[new_dataset_key])
new_table_ref = dataset_ref.table(self.table_id)
args = [self.gcbq_table_ref, new_table_ref]
self.run_job("copy_table", args, options, default_options)
def move_to_new_dataset(self, new_dataset_id):
self.copy_to_new_dataset(new_dataset_id)
self.client.delete_table(self.table_id)
class Results(object):
def __init__(self, gcbq_row_iterator):
self._gcbq_row_iterator = gcbq_row_iterator
self._rows = list(gcbq_row_iterator)
@property
def rows(self):
return [row.values() for row in self._rows]
@property
def rows_as_dicts(self):
return [dict(row) for row in self._rows]
@property
def field_names(self):
"""
Returns names of fields in the same order as they will be in
`row.values()`
"""
field_to_index = self._gcbq_row_iterator._field_to_index
sorted_fields = sorted(
field_to_index.items(), key=lambda field_and_index: field_and_index[1]
)
return [field for (field, index) in sorted_fields]
class TableExporter(object):
def __init__(self, table, storage_prefix):
self.table = table
self.storage_prefix = storage_prefix
storage_client = StorageClient()
self.bucket = storage_client.bucket()
def export_to_storage(self, **options):
self.table.export_to_storage(self.storage_prefix, **options)
def storage_blobs(self):
for blob in self.bucket.list_blobs(prefix=self.storage_prefix):
yield blob
def download_from_storage(self):
for blob in self.storage_blobs():
with tempfile.NamedTemporaryFile(mode="rb+") as f:
blob.download_to_file(f)
f.flush()
f.seek(0)
yield f
def download_from_storage_and_unzip(self, f_out):
for i, f_zipped in enumerate(self.download_from_storage()):
# Unzip
if i == 0:
cmd = "gunzip -c -f %s >> %s"
else:
# When the file is split into several shards in GCS, it
# puts a header on every file, so we have to skip that
# header on all except the first shard.
cmd = "gunzip -c -f %s | tail -n +2 >> %s"
subprocess.check_call(cmd % (f_zipped.name, f_out.name), shell=True)
def delete_from_storage(self):
for blob in self.storage_blobs():
blob.delete()
def row_to_dict(row, field_names):
"""Convert a row from bigquery into a dictionary, and convert NaN to
None
"""
dict_row = {}
for value, field_name in zip(row, field_names):
if value and str(value).lower() == "nan":
value = None
dict_row[field_name] = value
return dict_row
def results_to_dicts(results):
return results.rows_as_dicts
def build_schema(*fields):
return [gcbq.SchemaField(*field) for field in fields]
def build_schema_from_model(model):
field_mappings = {
model_fields.BigIntegerField: "INTEGER",
model_fields.CharField: "STRING",
model_fields.DateField: "DATE",
model_fields.FloatField: "FLOAT",
model_fields.DecimalField: "NUMERIC",
model_fields.IntegerField: "INTEGER",
model_fields.BooleanField: "BOOLEAN",
model_fields.NullBooleanField: "BOOLEAN",
model_fields.TextField: "STRING",
related_fields.ForeignKey: "INTEGER",
related_fields.OneToOneField: "INTEGER",
}
fields = [
(f.name, field_mappings[type(f)])
for f | |
different lengths to confirm that attention masks are operational in XLA
sentences = [
"Translate English to German: Today is a beautiful day.",
"Translate English to German: I have four cats, three dogs, two birds, and a horse.",
]
input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids
xla_generate = tf.function(model.generate, jit_compile=True)
output_ids = model.generate(input_ids)
output_ids_xla = xla_generate(input_ids)
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
output_strings_xla = tokenizer.batch_decode(output_ids_xla, skip_special_tokens=True)
expected_output_string = [
"Heute ist ein schöner Tag.",
"Ich habe vier Katzen, drei Hunde, zwei Vögel und ein Pferd.",
]
self.assertListEqual(expected_output_string, output_strings)
self.assertListEqual(expected_output_string, output_strings_xla)
@slow
def test_greedy_generate(self):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
sentences = ["Yesterday, my name was", "Today is a beautiful day and"]
input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids
generation_kwargs = {
"bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids],
"no_repeat_ngram_size": 3,
"do_sample": False,
"repetition_penalty": 2.2,
}
output_ids = model.generate(input_ids, **generation_kwargs)
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
expected_output_string = ["Yesterday, my name was", "Heute ist ein schöne Tag und"]
self.assertListEqual(expected_output_string, output_strings)
@slow
def test_sample_xla_generate_simple(self):
# NOTE: due to the small numerical differences that are natural when we compile to XLA, sampling the same
# output out of the same seed is far from guaranteed. We can, however, confirm that the results are sensible
# and that we can seed both versions.
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0"):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
sentence = "Translate English to German: I have two bananas"
input_ids = tokenizer(sentence, return_tensors="tf", padding=True).input_ids
expected_output_string = ["Ich habe zwei Bananen"]
expected_output_string_xla = ["Ich habe 2 Bananen"]
# seed set -> deterministic sampling sequence -> deterministic generation
output_ids = model.generate(input_ids, do_sample=True, seed=[42, 0])
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
self.assertListEqual(expected_output_string, output_strings)
xla_generate = tf.function(model.generate, jit_compile=True)
# seed set -> deterministic sampling sequence -> deterministic generation
output_ids_xla = xla_generate(input_ids, do_sample=True, seed=[42, 0])
output_strings_xla = tokenizer.batch_decode(output_ids_xla, skip_special_tokens=True)
self.assertListEqual(expected_output_string_xla, output_strings_xla)
@slow
def test_sample_generate(self):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
sentences = ["I really love my", "Translate English to German: the transformers are truly amazing"]
input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids
generation_kwargs = {
"do_sample": True,
"bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids],
"no_repeat_ngram_size": 3,
"repetition_penalty": 2.2,
"temperature": 0.8,
"top_k": 500,
"top_p": 0.9,
"seed": [20, 0], # seed set -> deterministic sampling sequence -> deterministic generation
}
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0"):
output_ids = model.generate(input_ids, **generation_kwargs)
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
expected_output_string = ["- I really love my way of this.", "die Transformatoren sind wirklich erstaunlich"]
self.assertListEqual(expected_output_string, output_strings)
@slow
def test_beam_search_generate(self):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
sentences = ["I really love my", "Translate English to German: the transformers are truly amazing"]
input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids
generation_kwargs = {
"bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids],
"no_repeat_ngram_size": 3,
"do_sample": False,
"repetition_penalty": 2.2,
"num_beams": 4,
}
output_ids = model.generate(input_ids, **generation_kwargs)
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
expected_output_string = ["Ich liebe es so sehr!", "die Transformatoren sind wirklich erstaunlich"]
self.assertListEqual(expected_output_string, output_strings)
@require_tf
@require_sentencepiece
@require_tokenizers
class TFT5ModelIntegrationTests(unittest.TestCase):
@cached_property
def model(self):
return TFT5ForConditionalGeneration.from_pretrained("t5-base")
@slow
def test_small_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_sum(loss).numpy()
EXPECTED_SCORE = -19.0845
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
@slow
def test_small_v1_1_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_v1.1_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1.1_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFT5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small")
tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_sum(loss).numpy()
EXPECTED_SCORE = -59.0293
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
@slow
def test_small_byt5_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.9.1
>>> path_to_byt5_small_checkpoint = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None)
>>> vocab = t5.data.ByteVocabulary()
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFT5ForConditionalGeneration.from_pretrained("google/byt5-small")
tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_sum(loss).numpy()
EXPECTED_SCORE = -60.7397
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
@slow
def test_summarization(self):
model = self.model
tok = T5Tokenizer.from_pretrained("t5-base")
FRANCE_ARTICLE = ( # @noqa
"Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings"
" Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane."
' Marseille prosecutor <NAME> told CNN that "so far no videos were used in the crash investigation."'
' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s'
" comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video"
" showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French"
" Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a"
" phone at the wreckage site. The two publications described the supposed video, but did not post it on"
" their websites. The publications said that they watched the video, which was found by a source close to"
" the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported."
' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the'
" cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the"
' screaming intensifies. Then nothing." "It is a very disturbing scene," said <NAME>,'
" editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said"
" the agency is not aware of any such video. Lt. Col. <NAME>, a French Gendarmerie spokesman"
" in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the"
' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,'
' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be'
" sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by"
" specialized technicians working hand-in-hand with investigators. But none of the cell phones found so"
" far have been sent to the institute, Menichini said. Asked whether staff involved in the search could"
' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin'
' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match'
' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered'
' cell phones from the crash site after Bild and Paris Match published their reports. "That is something'
" we did not know before. ... Overall we can say many things of the investigation weren't revealed by the"
' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline'
" Lufthansa confirmed Tuesday that co-pilot <NAME> had battled depression years before he took the"
" controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the"
' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of'
' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school'
" discovered in an internal investigation, Lufthansa said, included medical documents he submitted in"
" connection with resuming his flight training. The announcement indicates that Lufthansa, the parent"
" company of | |
860),
(408, 727), (803, 844), (640, 684), (1, 626), (505, 847), (341, 888), (249, 747), (333, 720), (64, 891),
(195, 939), (227, 581), (244, 822),
(145, 990), (556, 822), (93, 458), (82, 327), (520, 896), (501, 955), (111, 308), (298, 564), (127, 723),
(340, 560), (834, 944), (208, 553),
(818, 986), (560, 617), (294, 601), (93, 455), (610, 817), (324, 394), (247, 589), (188, 297), (193, 841),
(33, 191), (627, 672), (266, 487),
(70, 91), (695, 775), (133, 897), (153, 945), (39, 862), (82, 919), (716, 945), (553, 849), (400, 699),
(722, 857), (282, 537), (534, 831),
(241, 869), (220, 916), (603, 695), (845, 972), (429, 593), (281, 461), (504, 676), (656, 717), (812, 938),
(84, 365), (332, 627), (118, 498),
(601, 645), (343, 865), (194, 248), (16, 749), (119, 277), (225, 722), (380, 813), (174, 340), (436, 835),
(63, 103), (149, 801), (714, 875),
(46, 224), (587, 836), (649, 931), (547, 958), (616, 696), (27, 75), (127, 650), (193, 620), (589, 850),
(122, 400), (93, 379), (118, 853),
(37, 620), (22, 199), (984, 993), (189, 735), (126, 490), (215, 744), (62, 819), (695, 959), (23, 557),
(435, 635), (103, 855), (71, 266), (73, 226),
(308, 662), (358, 446), (62, 184), (478, 515), (40, 610), (103, 716), (204, 400), (266, 367), (749, 926),
(481, 858), (923, 940), (173, 583),
(688, 714), (208, 989), (59, 785), (692, 807), (162, 865), (165, 350), (256, 542), (120, 611), (452, 943),
(179, 681), (13, 482), (419, 697),
(582, 921), (520, 895), (318, 939), (365, 664), (397, 857), (256, 673), (157, 574), (12, 707), (468, 759),
(80, 343), (46, 756), (287, 557),
(138, 245), (780, 976), (360, 493), (294, 624), (367, 689), (604, 969), (648, 913), (635, 874), (135, 732),
(317, 397), (424, 766), (666, 848),
(1, 82), (196, 608), (342, 715), (163, 245), (228, 652), (387, 458), (727, 896), (581, 689), (424, 895),
(32, 411), (718, 892), (428, 581),
(678, 790), (47, 726), (169, 456), (65, 265), (161, 718), (457, 540), (498, 906), (574, 929), (618, 773),
(0, 905), (39, 506), (319, 333),
(478, 857), (51, 828), (842, 896), (831, 997), (192, 425), (561, 986), (85, 648), (742, 857), (15, 133),
(411, 972), (427, 694), (3, 323), (14, 218),
(734, 772), (2, 842), (541, 691), (100, 626), (121, 195), (622, 664), (203, 894), (286, 309), (186, 705),
(102, 487), (874, 944), (406, 642),
(22, 83), (281, 935), (463, 819), (118, 811), (262, 882), (136, 669), (533, 836), (660, 666), (117, 355),
(158, 892), (285, 871), (19, 43),
(41, 210), (265, 697), (322, 571), (375, 969), (581, 960), (869, 931), (43, 866), (767, 984), (622, 718),
(506, 671), (659, 729), (469, 924),
(445, 655), (381, 892), (182, 550), (212, 384), (298, 601), (9, 141), (154, 277), (341, 345), (376, 808),
(95, 735), (346, 798), (36, 635),
(42, 276), (153, 167), (296, 597), (369, 404), (132, 561), (117, 300), (489, 748), (245, 956), (49, 315),
(183, 877), (535, 746), (72, 309),
(412, 855), (306, 336), (111, 424), (101, 574), (492, 930), (345, 485), (817, 861), (831, 999), (127, 351),
(118, 490), (509, 716), (38, 436),
(309, 343), (703, 752), (159, 915), (170, 941), (578, 641), (384, 825), (654, 997), (67, 89), (86, 827),
(202, 767), (62, 226), (8, 394), (100, 403),
(531, 569), (296, 459), (500, 942), (598, 807), (695, 731), (222, 433), (85, 377), (225, 267), (599, 795),
(170, 441), (196, 367), (65, 117),
(841, 884), (718, 873), (28, 924), (462, 538), (693, 770), (121, 206), (407, 509), (212, 262), (43, 656),
(816, 970), (221, 638), (107, 149),
(202, 469), (370, 387), (559, 846), (107, 154), (499, 610), (151, 577), (415, 653), (433, 696), (533, 898),
(507, 695), (909, 939), (330, 853),
(510, 511), (650, 686), (206, 895), (555, 624), (224, 953), (9, 348), (722, 985), (764, 920), (325, 837),
(36, 329), (151, 537), (263, 895),
(617, 802), (159, 862), (388, 596), (301, 735), (723, 826), (67, 481), (86, 819), (528, 889), (40, 937),
(67, 230), (41, 133), (15, 307), (777, 864),
(338, 459), (164, 882), (152, 819), (671, 889), (471, 991), (380, 517), (391, 922), (514, 542), (34, 587),
(92, 694), (813, 824), (530, 776),
(78, 614), (436, 764), (772, 927), (211, 296), (548, 922), (427, 612), (845, 995), (493, 865), (810, 995),
(397, 622), (239, 600), (871, 885),
(20, 817), (672, 906), (0, 758), (186, 309), (519, 583), (260, 340), (67, 505), (268, 880), (844, 965),
(310, 791), (393, 417), (392, 829),
(63, 167), (656, 957), (130, 244), (293, 746), (342, 849), (56, 964), (36, 492), (144, 427), (503, 911),
(616, 884), (83, 734), (689, 715),
(155, 829), (361, 421), (36, 626), (395, 477), (48, 469), (103, 482), (155, 796), (20, 33), (612, 632),
(135, 645), (107, 331), (562, 716),
(354, 664), (199, 392), (795, 802), (502, 796), (113, 902), (61, 624), (478, 717), (629, 647), (345, 956),
(127, 666), (698, 992), (636, 730),
(303, 807), (130, 869), (933, 981), (396, 818), (300, 938), (763, 893), (697, 980), (124, 829), (531, 881),
(193, 804), (39, 800), (401, 455),
(380, 774), (195, 466), (365, 808), (77, 647), (45, 979), (923, 956), (40, 497), (261, 922), (27, 967),
(532, 682), (582, 585), (221, 896),
(95, 235), (794, 839), (905, 910), (642, 798), (514, 715), (430, 536), (312, 519), (116, 968), (149, 436),
(579, 913), (432, 945), (86, 958),
(107, 425), (64, 101), (425, 792), (159, 751), (31, 977), (457, 810), (441, 702), (30, 427), (508, 941),
(884, 985), (332, 739), (80, 258),
(72, 360), (124, 367), (30, 708), (353, 356), (10, 182), (850, 997), (236, 838), (72, 374), (610, 914),
(146, 212), (3, 209), (674, 689), (749, 960),
(126, 922), (7, 765), (300, 377), (25, 706), (619, 955), (238, 879), (145, 191), (115, 464), (352, 488),
(724, 982), (133, 264), (28, 989),
(213, 370), (343, 484), (299, 983), (303, 959), (899, 981), (566, 651), (188, 334), (82, 607), (105, 546),
(315, 594), (160, 385), (150, 919),
(128, 968), (228, 823), (323, 520), (242, 248), (188, 772), (298, 381), (429, 679), (47, 881), (135, 615),
(21, 403), (79, 719), (74, 135),
(306, 430), (426, 563), (758, 948), (145, 605), (305, 432), (363, 652), (86, 254), (455, 647), (378, 652),
(541, 971), (59, 385), (8, 418),
(427, 985), (745, 922), (328, 451), (208, 380), (300, 975), (93, 482), (189, 973), (111, 815), (114, 283),
(571, 620), (157, 704), (719, 814),
(456, 950), (189, 408), (431, 786), (178, 442), (253, 982), (348, 464), (535, 959), (145, 363), (473, 646),
(88, 652), (494, 773), (208, 301),
(1, 850), (459, 715), (473, 633), (7, 223), (117, 305), (644, 787), (308, 558), (159, 623), (434, 723),
(482, 769), (94, 694), (509, 778),
(237, 983), (556, 780), (286, 415), (22, 647), (123, 276), (684, 904), (0, 41), (262, 407), (538, 911),
(595, 727), (405, 455), (104, 764),
(258, 362), (290, 892), (688, 773), (200, 930), (87, 609), (36, 72), (268, 811), (312, 546), (121, 348),
(542, 880), (255, 912), (780, 942),
(69, 167), (424, 881), (289, 296), (137, 532), (535, 587), (215, 642), (107, 544), (420, 978), (556, 649),
(413, 759), (797, 925), (285, 807),
(299, 452), (380, 581), (141, 643), (126, 160), (123, 713), (390, 410), (479, 605), (142, 573), (306, 684),
(362, 647), (484, 760), (223, 425),
(488, 500), (513, 711), (325, 504), (667, 981), (61, 454), (146, 307), (507, 763), (53, 908), (220, 636),
(26, 363), (400, 482), (10, 909),
(539, 866), (68, 703), (83, 887), (702, 972), (759, 946), (404, 685), (6, 369), (42, 118), (3, 635),
(276, 894), (655, 716), (299, 744), (232, 922),
(144, 769), (294, 586), (107, 195), (444, 471), (338, 733), (172, 393), (338, 431), (663, 918), (445, 703),
(151, 458), (725, 955), (151, 536),
(132, 323), (213, 932), (191, 454), (357, 808), (398, 437), (503, 826), (398, 747), (225, 814), (200, 449),
(209, | |
len(message_chain[Image]) != 0:
message += '[图片]'
flash_image = message_chain[FlashImage]
if len(flash_image) != 0:
message += '[闪照]'
# ===================================================================================
# ===================================================================================
# 基本信息获取
# interceptable_need_reply = False # 可被打断的回复
need_reply = False # 是否需要回复
merge_reply = False # 是否合并回复
reply_text = '' # 回复的文本内容
reply_image = '' # 回复的图片
need_complex_reply = False # 是否是复杂回复
complex_at = {
'at_type': -1, # -1:不艾特;0:艾特;1:艾特分组
'at': 0
} # 复杂艾特
complex_reply = None # 复杂回复
need_at = False # 是否需要at
at_qq = 0 # at的qq是谁
# 状态信息
group_right = 2 # 在群里的权限(群主、管理员、成员)
if mode == 0:
group_id = 0 # 发消息的人的群号(如果是群聊消息)
group_name = ''
else:
group_id = event.sender.group.id
group_name = event.sender.group.name
tmp = str(event.sender.permission)
if tmp == 'Permission.Owner':
group_right = 0
elif tmp == 'Permission.Administrator':
group_right = 1
qq = event.sender.id # (发消息人的qq)
name = event.sender.get_name()
right = self.get_right(qq) # 对于小柒的权限(主人、管理员、贡献者)
blacklist = self.get_blacklist(qq, group_id)
if mode == 0 or mode == 2:
be_at = True
self.get_user(qq)
if mode == 1:
self.get_group(group_id)
key_allow = []
if mode == 1:
key_allow = self.groups[group_id]['config']['key']
elif mode == 0 or mode == 2:
key_allow = self.users[qq]['config']['key']
# 获取指令信息
message = message.strip()
tunneling: dict = dataManage.load_obj('data/Function/tunneling')
if tunneling.__contains__(group_id):
if tunneling[group_id].__contains__(message):
print('隧穿指令:%s->%s' % (message, tunneling[group_id][message]))
message = tunneling[group_id][message]
else:
for key, value in tunneling[group_id].items():
if message.startswith(key):
message = message.replace(key, value, 1)
break
message_len = len(message)
message_code = message.lower()
if len(key_allow) == 0:
message_code = message_code
elif message_len > 0 and message_code[0] in key_allow:
message_code = message_code[1:]
else:
message_code = ''
message_code_len = len(message_code)
be_mute = (mode == 1 and self.groups[group_id]['config']['mute'])
master = await bot.get_friend(self.config['master'])
# print('\tmessage:' + message)
# print('\tmessage_code:' + message_code)
# print('\tqq:' + str(qq) + '<' + name + '>')
# if mode == 1:
# print('\tgroup:' + str(group_id) + '<' + event.sender.group.get_name() + '>')
# print('\tmute:' + str(be_mute))
# ===================================================================================
# ===================================================================================
# 消息处理开始
# 禁言消息的处理
if mode == 1 and message[:5] != '删除屏蔽词' and message[
:5] != '添加屏蔽词' and message != '清空屏蔽词' and message != '查看屏蔽词':
revoke = False
for key in self.groups[group_id]['prohibited_word']:
if key in message:
reply_text = '发现屏蔽词“' + key + '”'
revoke = True
break
if revoke:
need_reply = True
need_at = True
if group_right == 2:
if str(event.sender.group.permission) != 'Permission.Member':
await bot.recall(message_chain.message_id)
reply_text += ',予以撤回~'
else:
reply_text += ',但是' + self.bot_name + '没有办法撤回诶~'
else:
reply_text += ',但是对方是管理员/群主,' + self.bot_name + '打不过,嘤嘤嘤~'
if need_reply:
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
# 基本信息查看
if message == '我的权限':
need_at = True
if blacklist == 1:
reply_text = '你当前在黑名单中~'
elif blacklist == 2:
reply_text = '本群当前在黑名单中'
elif right == 0:
reply_text = '当前权限:主人\n可以输入“主人帮助”来获取指令帮助哦~'
elif right == 1:
reply_text = '当前权限:管理员\n可以输入“管理员帮助”来获取指令帮助哦~'
elif right == 2:
reply_text = '当前权限:贡献者\n可以输入“贡献者帮助”来获取指令帮助哦~'
elif right == 3:
reply_text = '当前权限:普通用户\n可以输入“*help”来获取指令帮助;输入“骰娘”来获取骰娘帮助;输入“游戏帮助”来获取游戏帮助'
if be_mute:
reply_text += '\n在本群中' + self.bot_name + '被禁言了'
self.statistics['help'] += 1
dataManage.save_statistics(self.statistics)
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message.replace('查看', '').replace('查询', '') == '开关列表' or message.replace('查看', '').replace('查询',
'') == '模块列表':
if mode == 1:
reply_image = BaseFunction.generate_module_list(group_id, self.groups[group_id])
else:
reply_text = '用户<' + name + '>模块开关情况如下:'
reply_text += '\n输入“模块管理帮助”获取所有指令的详细说明'
reply_text += '\n格式:”字段(操作指令):状态“\n'
reply_text += '\n是否开启ai(时不时自主回复)【开启/关闭智能回复】:' + bool_string(self.users[qq]['config']['ai'])
self.statistics['help'] += 1
dataManage.save_statistics(self.statistics)
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
# 如果是黑名单那么不会回复任何消息
if blacklist != 0:
return
if message_len == 0:
if be_at:
reply_text = '我在'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
# 如果被限制那么只回复at消息
if mode == 1:
if self.groups[group_id]['config']['limit']:
if not be_at:
return
# ===================================================================================
# 处理上一次的消息
if self.users[qq]['buffer']['id'] != 0:
reset_buffer = True
if self.users[qq]['buffer']['id'] == 1: # 群欢迎语
self.get_group(self.users[qq]['buffer']['buffer'])
self.groups[self.users[qq]['buffer']['buffer']]['welcome'] = message_chain
reply_text = self.bot_name + '已经记录下了~!'
need_reply = True
dataManage.save_group(self.users[qq]['buffer']['buffer'],
self.groups[self.users[qq]['buffer']['buffer']])
elif self.users[qq]['buffer']['id'] == 2: # 清空屏蔽词
if message == '是' or message == '确定' or message == '确认' or message == '可':
self.get_group(self.users[qq]['buffer']['buffer'])
self.groups[self.users[qq]['buffer']['buffer']]['prohibited_word'] = []
reply_text = self.bot_name + '已经帮您清空了'
need_reply = True
dataManage.save_group(self.users[qq]['buffer']['buffer'],
self.groups[self.users[qq]['buffer']['buffer']])
else:
reply_text = self.bot_name + '啊嘞?已为您取消清空。'
need_reply = True
elif self.users[qq]['buffer']['id'] == 3: # 覆盖分组
if message == '是' or message == '确定' or message == '确认' or message == '可':
tmp_members = self.users[qq]['buffer']['buffer']['members']
tmp_name = self.users[qq]['buffer']['buffer']['name']
tmp_group_id = self.users[qq]['buffer']['buffer']['group_id']
self.get_group(tmp_group_id)
operator.del_group(tmp_group_id, self.groups[tmp_group_id], tmp_name)
operator.add_group(tmp_group_id, self.groups[tmp_group_id], tmp_name, tmp_members, qq)
reply_text = '已经覆盖~'
need_reply = True
else:
reply_text = self.bot_name + '啊嘞?已为您取消覆盖分组。'
need_reply = True
elif self.users[qq]['buffer']['id'] == 4: # 清空分组
if message == '是' or message == '确定' or message == '确认' or message == '可':
tmp_group_id = self.users[qq]['buffer']['buffer']['group_id']
self.get_group(tmp_group_id)
self.groups[tmp_group_id]['group'] = {}
dataManage.save_group(tmp_group_id, self.groups[tmp_group_id])
reply_text = '清空成功!'
need_reply = True
else:
reply_text = self.bot_name + '啊嘞?已为您取消清空分组。'
need_reply = True
elif self.users[qq]['buffer']['id'] == 5: # 创建复杂回复的触发词
if message != '*取消创建*':
self.users[qq]['buffer']['id'] = 6
self.users[qq]['buffer']['buffer'] = {
'group_id': self.users[qq]['buffer']['buffer'],
'key': message
}
dataManage.save_user(qq, self.users[qq])
reply_text = '触发词:' + message
reply_text += '\n小柒已为您记录下来了,请问你的回复内容是什么?(可以文字+图片,不可以包含艾特)'
reset_buffer = False
else:
reply_text = '已为您取消创建'
need_reply = True
elif self.users[qq]['buffer']['id'] == 6: # 创建复杂回复的回复内容
if message != '*取消创建*':
self.users[qq]['buffer']['id'] = 7
self.users[qq]['buffer']['buffer']['reply'] = message_chain
dataManage.save_user(qq, self.users[qq])
reply_text += '小柒记录下来了,请问这条消息需要艾特谁吗(全体成员/分组/触发人/QQ号,这四种都是可以的哦~如果QQ号为0表示不艾特,如果不明白分组可以看“贡献者帮助”)?'
reset_buffer = False
else:
reply_text = '已为您取消创建'
need_reply = True
elif self.users[qq]['buffer']['id'] == 7: # 创建复杂回复的艾特对象
message = message.replace('@', '').strip()
if message != '*取消创建*':
if message == '全体成员':
self.users[qq]['buffer']['buffer']['at_type'] = 0 # 0表示艾特
self.users[qq]['buffer']['buffer']['at'] = -1
elif message == '触发人':
self.users[qq]['buffer']['buffer']['at_type'] = 0
self.users[qq]['buffer']['buffer']['at'] = 0
elif message.isdigit():
buffer_at = int(message)
if buffer_at > 0:
self.users[qq]['buffer']['buffer']['at_type'] = 0
self.users[qq]['buffer']['buffer']['at'] = buffer_at
else:
self.users[qq]['buffer']['buffer']['at_type'] = -1 # -1表示不艾特
self.users[qq]['buffer']['buffer']['at'] = 0
else:
self.users[qq]['buffer']['buffer']['at_type'] = 1 # 1表示艾特分组
self.users[qq]['buffer']['buffer']['at'] = message
self.get_group(self.users[qq]['buffer']['buffer']['group_id'])
group = self.groups[self.users[qq]['buffer']['buffer']['group_id']]
if not group['key_reply'].__contains__('complex'):
group['key_reply']['complex'] = {}
group['key_reply']['complex'][self.users[qq]['buffer']['buffer']['key']] = {
'reply': self.users[qq]['buffer']['buffer']['reply'],
'at': self.users[qq]['buffer']['buffer']['at'],
'at_type': self.users[qq]['buffer']['buffer']['at_type']
}
dataManage.save_group(self.users[qq]['buffer']['buffer']['group_id'], group)
reply_text = '创建成功~'
else:
reply_text = '已为您取消创建'
need_reply = True
elif self.users[qq]['buffer']['id'] == 8: # 自动审批暗号
self.get_group(self.users[qq]['buffer']['buffer'])
self.groups[self.users[qq]['buffer']['buffer']]['config']['pass'] = message
reply_text = self.bot_name + '已经记录下了~!当前入群暗号:' + message
need_reply = True
dataManage.save_group(self.users[qq]['buffer']['buffer'],
self.groups[self.users[qq]['buffer']['buffer']])
elif self.users[qq]['buffer']['id'] == 9: # XMU服务条款同意
need_reply = True
if message == '同意':
reply_text = '很高兴您订阅“厦大自动健康打卡”服务,请问您的厦大统一身份认证账号是什么?'
reset_buffer = False
self.users[qq]['buffer']['id'] = 10
self.users[qq]['buffer']['buffer'] = {
'account': '',
'password': ''
}
dataManage.save_user(qq, self.users[qq])
else:
reply_text = '已取消为您取消订阅“厦大自动健康打卡”服务'
elif self.users[qq]['buffer']['id'] == 10: # XMU服务账号
need_reply = True
reply_text = '请问您的厦大统一身份认证密码是什么?(请再次确保您在私聊!)'
reset_buffer = False
self.users[qq]['buffer']['id'] = 11
self.users[qq]['buffer']['buffer'] = {
'account': message,
'password': ''
}
dataManage.save_user(qq, self.users[qq])
elif self.users[qq]['buffer']['id'] == 11: # XMU服务密码
need_reply = True
reply_text = '好的~已为您记录下来了,将会在每天12:05自动打卡,并私聊告诉你打卡的结果,请确保有添加' + self.get_name() + '的好友'
reply_text += '\n你可以通过输入“AsYNARTvgt”来退订此服务'
password_byte = bytes(message, encoding="utf8")
ciphertext = base64.b64encode(password_byte)
xmu = dataManage.load_obj('lib/account')
xmu[qq] = {
'account': self.users[qq]['buffer']['buffer']['account'],
'password': ciphertext
}
dataManage.save_obj(xmu, 'lib/account')
elif self.users[qq]['buffer']['id'] == 12: # 订阅定时全局禁言服务
need_reply = True
get_time = True
value = {
'id': qq,
'hour1': 0,
'minute1': 0,
'hour2': 0,
'minute2': 0
}
list1 = message.replace(':', ':').split(' ')
if len(list1) != 2:
get_time = False
else:
list1_1 = list1[0].split(':')
list1_2 = list1[1].split(':')
if len(list1_1) != 2 or len(list1_2) != 2:
get_time = False
else:
list1_1[0] = time_pretreatment(list1_1[0])
list1_1[1] = time_pretreatment(list1_1[1])
list1_2[0] = time_pretreatment(list1_2[0])
list1_2[1] = time_pretreatment(list1_2[1])
if not list1_1[0].isdigit() or not list1_1[1].isdigit or not list1_2[0].isdigit() or not \
list1_2[1].isdigit:
get_time = False
else:
value['hour1'] = int(list1_1[0])
value['minute1'] = int(list1_1[1])
value['hour2'] = int(list1_2[0])
value['minute2'] = int(list1_2[1])
if not valid_time(value['hour1'], value['minute1']) or not valid_time(value['hour2'],
value['minute2']):
get_time = False
if not get_time:
if message != '取消':
reset_buffer = False
await bot.send(event, '这好像不是一个正确的格式,你可以输入“取消”来取消创建。请再次告诉我时间:')
else:
await bot.send(event, '已为您取消创建')
else:
muteall_schedule = dataManage.load_obj('data/Function/muteall') # 禁言计划
if value['hour1'] == value['hour2'] and value['minute1'] == value['minute2']:
reset_buffer = False
await bot.send(event, '这好像只有一分钟呢,你可以输入“取消”来取消创建。请再次告诉我时间:')
else:
muteall_schedule[group_id] = value
dataManage.save_obj(muteall_schedule, 'data/Function/muteall')
await bot.send(event, '创建成功!你可以输入“模块列表”来查看订阅的服务')
if reset_buffer:
self.users[qq]['buffer']['id'] = 0
self.users[qq]['buffer']['buffer'] = None
dataManage.save_user(qq, self.users[qq])
if need_reply:
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
# ===================================================================================
# 如果是群聊消息,并且具有小柒的操作权限,那么就可以进行退群和禁言的操作
if mode == 1:
if message_code == 'quit' or message_code == 'dismiss':
if group_right < 2 or right < 3:
await bot.send(event, '再见啦~各位!我会想你们的!')
await bot.quit(group_id)
self.statistics['quit'] += 1
dataManage.save_statistics(self.statistics)
logManage.group_log(getNow.toString(), qq, group_id, event.sender.group.get_name(),
message + '; 小柒退群!')
if master is not None:
await bot.send_friend_message(master.id, [
Plain('已退出群聊:' + str(group_id) + '!')
])
else:
reply_text = '权限不足,需要群管理或群主或者小柒的管理员'
await send_message(bot, event, mode, merge_reply, reply_text, reply_image, need_at, at_qq)
return
elif message_code == | |
<reponame>TobiasSchalau/privacy-evaluator
from privacy_evaluator.attacks.attack import Attack
from privacy_evaluator.classifiers.classifier import Classifier
import privacy_evaluator.utils.data_utils as data_utils
from privacy_evaluator.utils.trainer import trainer
from privacy_evaluator.models.tf.conv_net_meta_classifier import ConvNetMetaClassifier
from privacy_evaluator.models.tf.cnn import ConvNet
import numpy as np
import torch
import tensorflow as tf
from sklearn.model_selection import train_test_split
from typing import Tuple, Dict, List
from art.estimators.classification import TensorFlowV2Classifier
import string
class PropertyInferenceAttack(Attack):
def __init__(
self, target_model: Classifier, dataset: Tuple[np.ndarray, np.ndarray]
):
"""
Initialize the Property Inference Attack Class.
:param target_model: the target model to be attacked
:param dataset: dataset for training of shadow classifiers, test_data from dataset
with concatenation [test_features, test_labels]
"""
self.dataset = dataset
# count of shadow training sets, must be eval
self.amount_sets = 2
self.input_shape = self.dataset[0][0].shape # [32, 32, 3] for CIFAR10
super().__init__(target_model, None, None, None, None)
def create_shadow_training_set(
self, num_elements_per_class: Dict[int, int],
) -> List[Tuple[np.ndarray, np.ndarray]]:
"""
Create the shadow training sets with given ratio.
The function works for the specific binary case that the ratio is a fixed distribution
specified in the input.
:param num_elements_per_class: number of elements per class
:return: shadow training sets for given ratio
"""
training_sets = []
# Creation of shadow training sets with the size dictionaries
# amount_sets divided by 2 because amount_sets describes the total amount of shadow training sets.
# In this function however only all shadow training sets of one type (follow property OR negation of property) are created, hence amount_sets / 2.
for _ in range(int(self.amount_sets / 2)):
shadow_training_sets = data_utils.new_dataset_from_size_dict(
self.dataset, num_elements_per_class
)
training_sets.append(shadow_training_sets)
return training_sets
def train_shadow_classifiers(
self,
shadow_training_sets: List[Tuple[np.ndarray, np.ndarray]],
num_elements_per_classes: Dict[int, int],
):
"""
Train shadow classifiers with each shadow training set (follows property or negation of property).
:param shadow_training_sets: datasets fulfilling the a specific ratio to train shadow_classifiers
:param num_elements_per_classes: specific class distribution
:return: list of shadow classifiers,
accuracies for the classifiers
:rtype: Tuple[ List[:class:.art.estimators.estimator.BaseEstimator]
"""
shadow_classifiers = []
num_classes = len(num_elements_per_classes)
for shadow_training_set in shadow_training_sets:
shadow_training_X, shadow_training_y = shadow_training_set
train_X, test_X, train_y, test_y = train_test_split(
shadow_training_X, shadow_training_y, test_size=0.3
)
train_set = (train_X, train_y)
test_set = (test_X, test_y)
model = ConvNet(num_classes, self.input_shape)
trainer(train_set, num_elements_per_classes, model)
# change pytorch classifier to art classifier
art_model = Classifier._to_art_classifier(
model, num_classes, self.input_shape
)
shadow_classifiers.append(art_model)
return shadow_classifiers
def create_shadow_classifier_from_training_set(
self, num_elements_per_classes: Dict[int, int]
) -> list:
# create training sets
shadow_training_sets = self.create_shadow_training_set(num_elements_per_classes)
# create classifiers with trained models based on given data set
shadow_classifiers = self.train_shadow_classifiers(
shadow_training_sets, num_elements_per_classes,
)
return shadow_classifiers
@staticmethod
def feature_extraction(model):
"""
Extract the features of a given model.
:param model: a model from which the features should be extracted
:type model: :class:`.art.estimators.estimator.BaseEstimator`
# BaseEstimator is very general and could be specified to art.classifier
:return: feature extraction
:rtype: np.ndarray
"""
# Filter out all trainable parameters (from every layer)
# This works differently for PyTorch and TensorFlow. Raise TypeError if model is
# neither of both.
if isinstance(model.model, torch.nn.Module):
model_parameters = list(
filter(lambda p: p.requires_grad, model.model.parameters())
)
# Store the remaining parameters in a concatenated 1D numPy-array
model_parameters = np.concatenate(
[el.cpu().detach().numpy().flatten() for el in model_parameters]
).flatten()
return model_parameters
elif isinstance(model.model, tf.keras.Model):
model_parameters = np.concatenate(
[el.numpy().flatten() for el in model.model.trainable_variables]
).flatten()
return model_parameters
else:
raise TypeError(
"Expected model to be an instance of {} or {}, received {} instead.".format(
str(torch.nn.Module), str(tf.keras.Model), str(type(model.model))
)
)
def create_meta_training_set(
self, classifier_list_with_property, classifier_list_without_property
):
"""
Create meta training set out of shadow classifiers.
:param classifier_list_with_property:
list of all shadow classifiers that were trained on a dataset which fulfills the property
:type classifier_list_with_property:
iterable object of :class:`.art.estimators.estimator.BaseEstimator`
:param classifier_list_without_property:
list of all shadow classifiers that were trained on a dataset which does NOT fulfill the
property
:type classifier_list_without_property:
iterable object of :class:`.art.estimators.estimator.BaseEstimator`
:return: tuple (Meta-training set, label set)
:rtype: tuple (np.ndarray, np.ndarray)
"""
# Apply self.feature_extraction on each shadow classifier and concatenate all features
# into one array
feature_list_with_property = np.array(
[
self.feature_extraction(classifier)
for classifier in classifier_list_with_property
]
)
feature_list_without_property = np.array(
[
self.feature_extraction(classifier)
for classifier in classifier_list_without_property
]
)
meta_features = np.concatenate(
[feature_list_with_property, feature_list_without_property]
)
# Create corresponding labels
meta_labels = np.concatenate(
[
np.ones(len(feature_list_with_property), dtype=int),
np.zeros(len(feature_list_without_property), dtype=int),
]
)
return meta_features, meta_labels
@staticmethod
def train_meta_classifier(
meta_training_X: np.ndarray, meta_training_y: np.ndarray
) -> TensorFlowV2Classifier:
"""
Train meta-classifier with the meta-training set.
:param meta_training_X: Set of feature representation of each shadow classifier.
:param meta_training_y: Set of labels for each shadow classifier,
according to whether property is fullfilled (1) or not (0)
:return: Art Meta classifier
"""
# reshaping train data to fit models input
meta_training_X = meta_training_X.reshape(
(meta_training_X.shape[0], meta_training_X[0].shape[0], 1)
)
meta_training_y = meta_training_y.reshape((meta_training_y.shape[0], 1))
meta_input_shape = meta_training_X[0].shape
# currently there are just 2 classes
nb_classes = 2
inputs = tf.keras.Input(shape=meta_input_shape)
# create model according to model from https://arxiv.org/pdf/2002.05688.pdf
cnmc = ConvNetMetaClassifier(inputs=inputs, num_classes=nb_classes)
cnmc.model.compile(
loss="sparse_categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"],
)
cnmc.model.fit(
x=meta_training_X,
y=meta_training_y,
epochs=2,
batch_size=128,
# If enough shadow classifiers are available, one could split the training set
# and create an additional validation set as input:
# validation_data = (validation_X, validation_y),
)
# model has .evaluate(test_X,test_y) function
# convert model to ART classifier
art_meta_classifier = Classifier._to_art_classifier(
cnmc.model, nb_classes=nb_classes, input_shape=meta_input_shape
)
return art_meta_classifier
@staticmethod
def perform_prediction(
meta_classifier, feature_extraction_target_model
) -> np.ndarray:
"""
"Actual" attack: Meta classifier gets feature extraction of target model as input, outputs
property prediction.
:param meta_classifier: A classifier
:type meta_classifier: "CLASSIFIER_TYPE" (to be found in .art.estimators)
:param feature_extraction_target_model: extracted features of target model
:type feature_extraction_target_model: np.ndarray
:return: Prediction given as probability distribution vector whether property or negation
of property is
fulfilled for target data set
:rtype: np.ndarray with shape (1, 2)
"""
feature_extraction_target_model = feature_extraction_target_model.reshape(
(feature_extraction_target_model.shape[0], 1)
)
assert meta_classifier.input_shape == tuple(
feature_extraction_target_model.shape
)
predictions = meta_classifier.predict(x=[feature_extraction_target_model])
return predictions
@staticmethod
def output_attack(predictions_ratios: Dict[float, np.ndarray]) -> string:
"""
Determination of prediction with highest probability.
:param predictions_ratios: Prediction values from meta-classifier for different subattacks (different properties)
:return: Output message for the attack
"""
# get key & value of ratio with highest property probability
max_property = max(predictions_ratios.items(), key=lambda item: item[1][0][0])
# get average of neg property probabilities of 0.05, 0.95 (most unbalanced datasets --> highest probability for correctness of neg probability)
average_unbalanced_cases_neg_property = (
predictions_ratios[0.95][0][1] + predictions_ratios[0.05][0][1]
) / 2
if max_property[1][0][0] > average_unbalanced_cases_neg_property:
return "The property inference attack predicts that the target model is unbalanced with a ratio of {}.".format(
max_property[0]
)
elif max_property[1][0][0] < average_unbalanced_cases_neg_property:
return "The property inference attack predicts that the target model is balanced."
else:
raise ValueError(
"Wrong input. Property inference attack cannot predict balanced and unbalanced."
)
def prediction_on_specific_property(
self,
feature_extraction_target_model: np.ndarray,
shadow_classifiers_neg_property: list,
ratio: float,
size_set: int,
) -> np.ndarray:
"""
Perform prediction for a subattack (specific property)
:param feature_extraction_target_model: extracted features of target model
:param shadow_classifiers_neg_property: balanced shadow classifiers negation property
:param ratio: distribution for the property
:param size_set: size of one class from data set
:return: Prediction of meta-classifier for property and negation property
"""
# property of given ratio, only on class 0 and 1 at the moment
property_num_elements_per_classes = {
0: int((1 - ratio) * size_set),
1: int(ratio * size_set),
}
# create shadow classifiers with trained models with unbalanced data set
shadow_classifiers_property = self.create_shadow_classifier_from_training_set(
property_num_elements_per_classes
)
# create meta training set
meta_features, meta_labels = self.create_meta_training_set(
shadow_classifiers_property, shadow_classifiers_neg_property
)
# create meta classifier
meta_classifier = self.train_meta_classifier(meta_features, meta_labels)
# get prediction
prediction = self.perform_prediction(
meta_classifier, feature_extraction_target_model
)
return prediction
def attack(self):
"""
Perform Property Inference attack.
:param params: Example data to run through target model for feature extraction
:type params: np.ndarray
:return: prediction about property of target data set
[[1, 0]]-> property; [[0, 1]]-> negation property
:rtype: np.ndarray with shape (1, 2)
"""
# extract features of target model
feature_extraction_target_model = self.feature_extraction(self.target_model)
# set ratio and size for unbalanced data sets
size_set = 1000
# balanced ratio
num_elements = int(round(size_set / 2))
neg_property_num_elements_per_class = {0: num_elements, 1: num_elements}
# create balanced shadow classifiers negation property
shadow_classifiers_neg_property = self.create_shadow_classifier_from_training_set(
neg_property_num_elements_per_class
)
predictions = {}
# iterate over unbalanced ratios in 0.05 steps (0.05-0.45, 0.55-0.95)
# (e.g. 0.55 means: class 0: 0.45 of all samples, class 1: 0.55 of all samples)
for ratio in | |
<reponame>Giuseppe5/NeMo
# Copyright (c) 2019 NVIDIA Corporation
# If you want to add your own data layer, you should put its name in
# __all__ so that it can be imported with 'from text_data_layers import *'
__all__ = ['TextDataLayer',
'BertSentenceClassificationDataLayer',
'BertJointIntentSlotDataLayer',
'BertJointIntentSlotInferDataLayer',
'LanguageModelingDataLayer',
'BertTokenClassificationDataLayer',
'BertTokenClassificationInferDataLayer',
'BertPretrainingDataLayer',
'BertPretrainingPreprocessedDataLayer',
'TranslationDataLayer',
'GlueDataLayerClassification',
'GlueDataLayerRegression']
# from abc import abstractmethod
import sys
import torch
from torch.utils import data as pt_data
import os
import h5py
import nemo
from nemo.backends.pytorch.nm import DataLayerNM
from nemo.core.neural_types import *
import random
import numpy as np
from .datasets import *
class TextDataLayer(DataLayerNM):
"""
Generic Text Data Layer NM which wraps PyTorch's dataset
Args:
dataset_type: type of dataset used for this datalayer
dataset_params (dict): all the params for the dataset
"""
def __init__(self, dataset_type, dataset_params, **kwargs):
super().__init__(**kwargs)
if isinstance(dataset_type, str):
dataset_type = getattr(sys.modules[__name__], dataset_type)
self._dataset = dataset_type(**dataset_params)
def __len__(self):
return len(self._dataset)
@property
def dataset(self):
return self._dataset
@property
def data_iterator(self):
return None
class BertSentenceClassificationDataLayer(TextDataLayer):
"""
Creates the data layer to use for the task of sentence classification
with pretrained model.
All the data processing is done BertSentenceClassificationDataset.
Args:
dataset (BertSentenceClassificationDataset):
the dataset that needs to be converted to DataLayerNM
"""
@staticmethod
def create_ports():
output_ports = {
"input_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_type_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"labels": NeuralType({
0: AxisType(BatchTag),
}),
}
return {}, output_ports
def __init__(self,
input_file,
tokenizer,
max_seq_length,
num_samples=-1,
shuffle=False,
batch_size=64,
dataset_type=BertSentenceClassificationDataset,
**kwargs):
kwargs['batch_size'] = batch_size
dataset_params = {'input_file': input_file,
'tokenizer': tokenizer,
'max_seq_length': max_seq_length,
'num_samples': num_samples,
'shuffle': shuffle}
super().__init__(dataset_type, dataset_params, **kwargs)
class BertJointIntentSlotDataLayer(TextDataLayer):
"""
Creates the data layer to use for the task of joint intent
and slot classification with pretrained model.
All the data processing is done in BertJointIntentSlotDataset.
input_mask: used to ignore some of the input tokens like paddings
loss_mask: used to mask and ignore tokens in the loss function
subtokens_mask: used to ignore the outputs of unwanted tokens in
the inference and evaluation like the start and end tokens
Args:
dataset (BertJointIntentSlotDataset):
the dataset that needs to be converted to DataLayerNM
"""
@staticmethod
def create_ports():
output_ports = {
"input_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_type_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"loss_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"subtokens_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"intents": NeuralType({
0: AxisType(BatchTag),
}),
"slots": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
}
return {}, output_ports
def __init__(self,
input_file,
slot_file,
pad_label,
tokenizer,
max_seq_length,
num_samples=-1,
shuffle=False,
batch_size=64,
ignore_extra_tokens=False,
ignore_start_end=False,
dataset_type=BertJointIntentSlotDataset,
**kwargs):
kwargs['batch_size'] = batch_size
dataset_params = {'input_file': input_file,
'slot_file': slot_file,
'pad_label': pad_label,
'tokenizer': tokenizer,
'max_seq_length': max_seq_length,
'num_samples': num_samples,
'shuffle': shuffle,
'ignore_extra_tokens': ignore_extra_tokens,
'ignore_start_end': ignore_start_end}
super().__init__(dataset_type, dataset_params, **kwargs)
class BertJointIntentSlotInferDataLayer(TextDataLayer):
"""
Creates the data layer to use for the task of joint intent
and slot classification with pretrained model. This is for
All the data processing is done in BertJointIntentSlotInferDataset.
input_mask: used to ignore some of the input tokens like paddings
loss_mask: used to mask and ignore tokens in the loss function
subtokens_mask: used to ignore the outputs of unwanted tokens in
the inference and evaluation like the start and end tokens
Args:
dataset (BertJointIntentSlotInferDataset):
the dataset that needs to be converted to DataLayerNM
"""
@staticmethod
def create_ports():
output_ports = {
"input_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_type_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"loss_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"subtokens_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
}
return {}, output_ports
def __init__(self,
queries,
tokenizer,
max_seq_length,
batch_size=1,
dataset_type=BertJointIntentSlotInferDataset,
**kwargs):
kwargs['batch_size'] = batch_size
dataset_params = {'queries': queries,
'tokenizer': tokenizer,
'max_seq_length': max_seq_length}
super().__init__(dataset_type, dataset_params, **kwargs)
class LanguageModelingDataLayer(TextDataLayer):
"""
Data layer for standard language modeling task.
Args:
dataset (str): path to text document with data
tokenizer (TokenizerSpec): tokenizer
max_seq_length (int): maximum allowed length of the text segments
batch_step (int): how many tokens to skip between two successive
segments of text when constructing batches
"""
@staticmethod
def create_ports():
"""
input_ids: indices of tokens which constitute batches of text segments
input_mask: bool tensor with 0s in place of tokens to be masked
labels: indices of tokens which should be predicted from each of the
corresponding tokens in input_ids; for left-to-right language
modeling equals to input_ids shifted by 1 to the right
"""
input_ports = {}
output_ports = {
"input_ids":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_mask":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"labels":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
})
}
return input_ports, output_ports
def __init__(self,
dataset,
tokenizer,
max_seq_length,
batch_step=128,
dataset_type=LanguageModelingDataset,
**kwargs):
dataset_params = {'dataset': dataset,
'tokenizer': tokenizer,
'max_seq_length': max_seq_length,
'batch_step': batch_step}
super().__init__(dataset_type, dataset_params, **kwargs)
class BertTokenClassificationDataLayer(TextDataLayer):
@staticmethod
def create_ports():
input_ports = {}
output_ports = {
"input_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_type_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"loss_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"subtokens_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"labels": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
})
}
return input_ports, output_ports
def __init__(self,
text_file,
label_file,
tokenizer,
max_seq_length,
pad_label='O',
label_ids=None,
num_samples=-1,
shuffle=False,
batch_size=64,
ignore_extra_tokens=False,
ignore_start_end=False,
use_cache=False,
dataset_type=BertTokenClassificationDataset,
**kwargs):
kwargs['batch_size'] = batch_size
dataset_params = {'text_file': text_file,
'label_file': label_file,
'max_seq_length': max_seq_length,
'tokenizer': tokenizer,
'num_samples': num_samples,
'shuffle': shuffle,
'pad_label': pad_label,
'label_ids': label_ids,
'ignore_extra_tokens': ignore_extra_tokens,
'ignore_start_end': ignore_start_end,
'use_cache': use_cache}
super().__init__(dataset_type, dataset_params, **kwargs)
class BertTokenClassificationInferDataLayer(TextDataLayer):
@staticmethod
def create_ports():
input_ports = {}
output_ports = {
"input_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_type_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"loss_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"subtokens_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
})
}
return input_ports, output_ports
def __init__(self,
queries,
tokenizer,
max_seq_length,
batch_size=1,
dataset_type=BertTokenClassificationInferDataset,
**kwargs):
kwargs['batch_size'] = batch_size
dataset_params = {'queries': queries,
'tokenizer': tokenizer,
'max_seq_length': max_seq_length}
super().__init__(dataset_type, dataset_params, **kwargs)
class BertPretrainingDataLayer(TextDataLayer):
"""
Data layer for masked language modeling task.
Args:
tokenizer (TokenizerSpec): tokenizer
dataset (str): directory or a single file with dataset documents
max_seq_length (int): maximum allowed length of the text segments
mask_probability (float): probability of masking input sequence tokens
batch_size (int): batch size in segments
short_seeq_prob (float): Probability of creating sequences which are
shorter than the maximum length.
Defualts to 0.1.
"""
@staticmethod
def create_ports():
"""
input_ids: indices of tokens which constitute batches of text segments
input_type_ids: indices of token types (e.g., sentences A & B in BERT)
input_mask: bool tensor with 0s in place of tokens to be masked
output_ids: indices of output tokens which should be predicted
output_mask: bool tensor with 0s in place of tokens to be excluded
from loss calculation
labels: indices of classes to be predicted from [CLS] token of text
segments (e.g, 0 or 1 in next sentence prediction task)
"""
input_ports = {}
output_ports = {
"input_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_type_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"output_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"output_mask": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"labels": NeuralType({0: AxisType(BatchTag)}),
}
return input_ports, output_ports
def __init__(self,
tokenizer,
dataset,
max_seq_length,
mask_probability,
short_seq_prob=0.1,
batch_size=64,
**kwargs):
kwargs['batch_size'] = batch_size
dataset_params = {'tokenizer': tokenizer,
'dataset': dataset,
'max_seq_length': max_seq_length,
'mask_probability': mask_probability,
'short_seq_prob': short_seq_prob}
super().__init__(BertPretrainingDataset, dataset_params, **kwargs)
class BertPretrainingPreprocessedDataLayer(DataLayerNM):
"""
Data layer for masked language modeling task.
Args:
tokenizer (TokenizerSpec): tokenizer
dataset (str): directory or a single file with dataset documents
max_seq_length (int): maximum allowed length of the text segments
mask_probability (float): probability of masking input sequence tokens
batch_size (int): batch size in segments
short_seeq_prob (float): Probability of creating sequences which are
shorter than the maximum length.
Defualts to 0.1.
"""
@staticmethod
def create_ports():
"""
input_ids: indices of tokens which constitute batches of text segments
input_type_ids: indices of token types (e.g., sentences A & B in BERT)
input_mask: bool tensor with 0s in place of tokens to be masked
output_ids: indices of output tokens which should be predicted
output_mask: bool tensor with 0s in place of tokens to be excluded
from loss calculation
labels: indices of classes to be predicted from [CLS] token of text
segments (e.g, 0 or 1 in next sentence prediction task)
"""
input_ports = {}
output_ports = {
"input_ids": NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_type_ids": NeuralType({
| |
<reponame>henrysky/gaia_tools
# Tools for cross-matching catalogs
import csv
import sys
import os
import os.path
import platform
import shutil
import subprocess
import tempfile
import warnings
WIN32= platform.system() == 'Windows'
import numpy
import astropy.coordinates as acoords
from astropy.table import Table
from astropy import units as u
from ..load.download import _ERASESTR
def xmatch(cat1,cat2,maxdist=2,
colRA1='RA',colDec1='DEC',epoch1=None,
colRA2='RA',colDec2='DEC',epoch2=None,
colpmRA2='pmra',colpmDec2='pmdec',
swap=False,
col_field=None):
"""
NAME:
xmatch
PURPOSE:
cross-match two catalogs (incl. proper motion in cat2 if epochs are different)
INPUT:
cat1 - First catalog
cat2 - Second catalog
maxdist= (2) maximum distance in arcsec
colRA1= ('RA') name of the tag in cat1 with the right ascension in degree in cat1 (assumed to be ICRS)
colDec1= ('DEC') name of the tag in cat1 with the declination in degree in cat1 (assumed to be ICRS)
epoch1= (2000.) epoch of the coordinates in cat1
colRA2= ('RA') name of the tag in cat2 with the right ascension in degree in cat2 (assumed to be ICRS)
colDec2= ('DEC') name of the tag in cat2 with the declination in degree in cat2 (assumed to be ICRS)
epoch2= (2000.) epoch of the coordinates in cat2
colpmRA2= ('pmra') name of the tag in cat2 with the proper motion in right ascension in degree in cat2 (assumed to be ICRS; includes cos(Dec)) [only used when epochs are different]
colpmDec2= ('pmdec') name of the tag in cat2 with the proper motion in declination in degree in cat2 (assumed to be ICRS) [only used when epochs are different]
swap= (False) if False, find closest matches in cat2 for each cat1 source, if False do the opposite (important when one of the catalogs has duplicates)
col_field= (None) if None, simply cross-match on RA and Dec; if a string, then cross-match on RA and Dec with additional matching in the data tag specified by the string
OUTPUT:
(index into cat1 of matching objects,
index into cat2 of matching objects,
angular separation between matching objects)
HISTORY:
2016-09-12 - Written - Bovy (UofT)
2016-09-21 - Account for Gaia epoch 2015 - Bovy (UofT)
2019-07-07 - add additional catalog field matching - Leung (UofT)
"""
if epoch1 is None:
if 'ref_epoch' in cat1.dtype.fields:
epoch1= cat1['ref_epoch']
else:
epoch1= 2000.
if epoch2 is None:
if 'ref_epoch' in cat2.dtype.fields:
epoch2= cat2['ref_epoch']
else:
epoch2= 2000.
_check_epoch(cat1,epoch1)
_check_epoch(cat2,epoch2)
depoch= epoch2-epoch1
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat2[colpmRA2]/numpy.cos(cat2[colDec2]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat2[colpmDec2]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat2[colpmRA2])]= 0.
ddec[numpy.isnan(cat2[colpmDec2])]= 0.
else:
dra= 0.
ddec= 0.
mc1= acoords.SkyCoord(cat1[colRA1],cat1[colDec1],
unit=(u.degree, u.degree),frame='icrs')
mc2= acoords.SkyCoord(cat2[colRA2]-dra,cat2[colDec2]-ddec,
unit=(u.degree, u.degree),frame='icrs')
if col_field is not None:
try: # check if the field actually exists in both cat1/cat2
cat1[col_field]
cat2[col_field]
except KeyError: # python 2/3 format string
raise KeyError("'%s' does not exist in both catalog" % col_field)
uniques = numpy.unique(cat1[col_field])
if swap: # times neg one to indicate those indices untouch will be noticed at the end and filtered out
d2d = numpy.ones(len(cat2)) * -1.
idx = numpy.zeros(len(cat2), dtype=int)
else:
d2d = numpy.ones(len(cat1)) * -1.
idx = numpy.zeros(len(cat1), dtype=int)
for unique in uniques: # loop over the class
idx_1 = numpy.arange(cat1[colRA1].shape[0])[cat1[col_field] == unique]
idx_2 = numpy.arange(cat2[colRA2].shape[0])[cat2[col_field] == unique]
if idx_1.shape[0] == 0 or idx_2.shape[0] == 0: # the case where a class only exists in one but not the other
continue
if swap:
temp_idx, temp_d2d, d3d = mc2[idx_2].match_to_catalog_sky(mc1[idx_1])
m1 = numpy.arange(len(cat2))
idx[cat2[col_field] == unique] = idx_1[temp_idx]
d2d[cat2[col_field] == unique] = temp_d2d
else:
temp_idx, temp_d2d, d3d = mc1[idx_1].match_to_catalog_sky(mc2[idx_2])
m1 = numpy.arange(len(cat1))
idx[cat1[col_field] == unique] = idx_2[temp_idx]
d2d[cat1[col_field] == unique] = temp_d2d
d2d = d2d * temp_d2d.unit # make sure finally we have an unit on d2d array s.t. "<" operation can complete
else:
if swap:
idx,d2d,d3d = mc2.match_to_catalog_sky(mc1)
m1= numpy.arange(len(cat2))
else:
idx,d2d,d3d = mc1.match_to_catalog_sky(mc2)
m1= numpy.arange(len(cat1))
# to make sure filtering out all neg ones which are untouched
mindx= ((d2d < maxdist*u.arcsec) & (0.*u.arcsec <= d2d))
m1= m1[mindx]
m2= idx[mindx]
if swap:
return (m2,m1,d2d[mindx])
else:
return (m1,m2,d2d[mindx])
def cds(cat,xcat='vizier:I/350/gaiaedr3',maxdist=2,colRA='RA',colDec='DEC',
selection='best',epoch=None,colpmRA='pmra',colpmDec='pmdec',
savefilename=None,gaia_all_columns=False):
"""
NAME:
cds
PURPOSE:
Cross-match against a catalog in the CDS archive using the CDS cross-matching service (http://cdsxmatch.u-strasbg.fr/xmatch); uses the curl interface
INPUT:
cat - a catalog to cross match, requires 'RA' and 'DEC' keywords (see below)
xcat= ('vizier:I/350/gaiaedr3') name of the catalog to cross-match against, in a format understood by the CDS cross-matching service (see http://cdsxmatch.u-strasbg.fr/xmatch/doc/available-tables.html; things like 'vizier:Tycho2' or 'vizier:I/345/gaia2')
maxdist= (2) maximum distance in arcsec
colRA= ('RA') name of the tag in cat with the right ascension
colDec= ('DEC') name of the tag in cat with the declination
selection= ('best') select either all matches or the best match according to CDS (see 'selection' at http://cdsxmatch.u-strasbg.fr/xmatch/doc/API-calls.html)
epoch= (2000.) epoch of the coordinates in cat
colpmRA= ('pmra') name of the tag in cat with the proper motion in right ascension in degree in cat (assumed to be ICRS; includes cos(Dec)) [only used when epoch != 2000.]
colpmDec= ('pmdec') name of the tag in cat with the proper motion in declination in degree in cat (assumed to be ICRS) [only used when epoch != 2000.]
gaia_all_columns= (False) set to True if you are matching against Gaia DR2 and want *all* columns returned; this runs a query at the Gaia Archive, which may or may not work...
savefilename= (None) if set, save the output from CDS to this path; can match back using cds_matchback
OUTPUT:
(xcat entries for those that match,
indices into cat of matching sources: index[0] is cat index of xcat[0])
HISTORY:
2016-09-12 - Written based on RC catalog code - Bovy (UofT)
2016-09-21 - Account for Gaia epoch 2015 - Bovy (UofT)
2018-05-08 - Added gaia_all_columns - Bovy (UofT)
"""
if epoch is None:
if 'ref_epoch' in cat.dtype.fields:
epoch= cat['ref_epoch']
else:
epoch= 2000.
_check_epoch(cat,epoch)
depoch= epoch-2000.
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat[colpmRA]/numpy.cos(cat[colDec]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat[colpmDec]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat[colpmRA])]= 0.
ddec[numpy.isnan(cat[colpmDec])]= 0.
else:
dra= numpy.zeros(len(cat))
ddec= numpy.zeros(len(cat))
if selection != 'all': selection= 'best'
if selection == 'all':
raise NotImplementedError("selection='all' CDS cross-match not currently implemented")
# Write positions
posfilename= tempfile.mktemp('.csv',dir=os.getcwd())
resultfilename= tempfile.mktemp('.csv',dir=os.getcwd())
with open(posfilename,'w') as csvfile:
wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL)
wr.writerow(['RA','DEC'])
for ii in range(len(cat)):
wr.writerow([(cat[ii][colRA]-dra[ii]+360.) % 360.,
cat[ii][colDec]]-ddec[ii])
_cds_match_batched(resultfilename,posfilename,maxdist,selection,xcat)
# Directly match on input RA
ma= cds_load(resultfilename)
if gaia_all_columns:
from astroquery.gaia import Gaia
# Write another temporary file with the XML output of the cross-match
tab= Table(numpy.array([ma['source_id'],ma['RA'],ma['DEC']]).T,
names=('source_id','RA','DEC'),
dtype=('int64','float64','float64'))
xmlfilename= tempfile.mktemp('.xml',dir=os.getcwd())
tab.write(xmlfilename,format='votable')
#get the data release....
table_identifier = xcat.split('/')[-1]
if table_identifier == 'gaia2':
table_identifier = 'gaiadr2'
try:
job= Gaia.launch_job_async(
"""select g.*, m.RA as mRA, m.DEC as mDEC
from %s.gaia_source as g
inner join tap_upload.my_table as m on m.source_id = g.source_id""" % table_identifier,
upload_resource=xmlfilename,
upload_table_name="my_table")
ma= job.get_results()
except:
print("gaia_tools.xmath.cds failed to retrieve all gaia columns, returning just the default returned by the CDS xMatch instead...")
else:
ma.rename_column('mra','RA')
ma.rename_column('mdec','DEC')
finally:
os.remove(xmlfilename)
# Remove temporary files
os.remove(posfilename)
if savefilename is None:
os.remove(resultfilename)
else:
shutil.move(resultfilename,savefilename)
# Match back to the original catalog
mai= cds_matchback(cat,ma,colRA=colRA,colDec=colDec,epoch=epoch,
colpmRA=colpmRA,colpmDec=colpmDec)
return (ma,mai)
def _cds_match_batched(resultfilename,posfilename,maxdist,selection,xcat,
nruns_necessary=1):
"""CDS xMatch (sometimes?) fails for large matches, because of a time-out,
so we recursively split until the batches are small enough to not fail"""
# Figure out which of the hierarchy we are running
try:
runs= ''.join([str(int(r)-1)
for r in posfilename.split('csv.')[-1].split('.')])
except ValueError:
runs= ''
nruns= 2**len(runs)
if nruns >= nruns_necessary:
# Only run this level's match if we don't already know that we should
# be using smaller batches
_cds_basic_match(resultfilename,posfilename,maxdist,selection,xcat)
try:
ma= cds_load(resultfilename)
except ValueError: # Assume this is the time-out failure
pass
else:
return nruns
# xMatch failed because of time-out, split
posfilename1= posfilename+'.1'
posfilename2= posfilename+'.2'
resultfilename1= resultfilename+'.1'
resultfilename2= resultfilename+'.2'
# Figure out which of the hierarchy we are running
runs= ''.join([str(int(r)-1)
for r in posfilename1.split('csv.')[-1].split('.')])
nruns= 2**len(runs)
thisrun1= 1+int(runs,2)
thisrun2= 1+int(''.join([str(int(r)-1)
for r in posfilename2.split('csv.')[-1].split('.')]),2)
# Count the number of objects
with open(posfilename,'r') as posfile:
num_lines= sum(1 for line in posfile)
# Write the header line
with open(posfilename1,'w') as posfile1:
with open(posfilename,'r') as posfile:
posfile1.write(posfile.readline())
with open(posfilename2,'w') as posfile2:
with open(posfilename,'r') as | |
/ CTP': 'سبل المعيشة/برنامج التحويلات النقدية',
'Livelihood Manager': 'مدير سبل المعيشة',
'Livelihoods': 'سبل العيش',
'Load Cleaned Data into Database': 'تحميل البيانات الكاملة إلى قاعدة البيانات',
'loading': 'جار التحميل',
'Loading': 'جار التحميل',
'Local Acronym': 'اسم المختصر المحلي',
'Local Currency': 'العملة المحلية',
'Local Name': 'الاسم المحلي',
'Local Names': 'أسماء محلية',
'Location': 'موقع',
'Location (Site)': 'الموقع (الموقع)',
'Location 1': 'موقع 1',
'Location 2': 'الموقع 2',
'Location added': 'تمت اضافة الموقع',
'Location Added': 'تم اضافة الموقع',
'Location added to %(site_label)s': ' الموقع تم إضافته إلى %(site_label)s',
'Location added to Group': 'الموقع تم إضافته إلى المجموعة',
'Location added to Organization': 'الموقع تم إضافته إلى المنظمة',
'Location added to Person': 'إضافة موقع إلى الشخص',
'Location data required': 'تتطلب بيانات الموقع',
'Location deleted': 'تم حذف الموقع',
'Location Deleted': 'موقع تم حذفة',
'Location Detail': 'تفاصيل الموقع',
'Location Details': 'تفاصيل الموقع',
'Location Fields': 'حقول الموقع',
'Location group cannot be a parent.': ' لا يمكن أن يكون موقع المجموعة كأب',
'Location Hierarchy Level 3 Name': 'اسم موقع التسلسل الهرمي للمستوى3',
'Location Hierarchy Level 4 Name': 'اسم مستوى4 للموقع على التسلسل الهرمي',
'Location Hierarchy Level 5 Name': 'اسم موقع التسلسل الهرمي للمستوى 5',
'Location is of incorrect level!': 'الموقع من مستوى غير صحيح!',
'Location removed from %(site_label)s': 'تم إزالة موقع من %(site_label)s',
'Location removed from Group': 'تم إزالة الموقع من المجموعة',
'Location removed from Organization': 'تم إزالة الموقع من المنظمة',
'Location removed from Person': 'تم إزالة الموقع من شخص',
'Location updated': 'تم تحديث الموقع',
'Location: ': 'الموقع:',
'Locations': 'مواقع',
'Log entry updated': 'تم تحديث السجل',
'Logged in': 'تسجيل الدخول',
'Logged out': 'تسجيل خروج',
'Login': 'تسجيل الدخول',
'login': 'تسجيل الدخول',
'Logistics': 'اللوجستية',
'Logistics & Warehouses': 'الخدمات اللوجستية والمستودعات',
'Logo': 'شعار',
'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'شعار المنظمة. يجب أن يكون هذا PNG أو ملف JPEG، وينبغي أن لا يكون أكبر من 400x400',
'Logout': 'خروج',
'long': 'طويل',
'Long Name': 'الاسم الكامل',
'Long-term': 'طويل الأمد',
'long>12cm': ' أطول من > 12 CM',
'Longitude': 'خط الطول',
'Longitude is Invalid!': 'خط الطول غير صالح!',
'Longitude is West-East (sideways).': 'يتمحور خط الطول من الغرب إلى الشرق (جانبي).',
'Longitude must be -180..180': 'يجب أن يكون خط الطول -180..180',
'Longitude must be between -180 and 180.': 'يجب أن يكون خط الطول بين -180 و 180.',
'Longitude of far eastern end of the region of interest.': 'خط الطول لأبعد نهاية في الشرق الأقصى من المنطقة المهمة.',
'Longitude of Map Center': ' طول مركز خريطة',
'Lost': 'مفقود',
'Lost Password': '<PASSWORD>',
'low': 'منخفض',
'Low': 'منخفضة',
'Magnetic Storm': 'عاصفة مغناطيسية',
'Main?': 'الأساسية؟',
'Mainstreaming DRR': 'تعميم الحد من مخاطر الكوارث',
'Major': 'الاختصاص',
'male': 'ذكر',
'Male': 'ذكر',
'Manage National Society Data': 'بيانات ادارة الجمعية الوطنية',
'Manage office inventories and assets.': 'ادارة المخان والموجودات الثابتة',
'Manage Offices Data': ' ادارة بيانات المكاتب',
'Manage Relief Item Catalogue': ' إدارة كتالوج عنصر الإغاثة',
'Manage requests of hospitals for assistance.': 'إدارة طلبات المستشفيات للحصول على المساعدة.',
'Manage Staff Data': ' ادارة بيانات الموضفين',
'Manage Teams Data': ' ادارة بيانات الفرق',
'Manage volunteers by capturing their skills, availability and allocation': 'إدارة المتطوعين من خلال التقاط مهاراتهم ، وتوافرهم وتوزيعهم',
'Manage Warehouses/Sites': 'إدارة المستودعات / المواقع',
'Manager': 'مدير',
'Managing material and human resources together to better prepare for future hazards and vulnerabilities.': 'ادارة الموارد البشرية والمادية للاعداد افضل في حاله المخاطر المستقبلية',
'Managing Office': 'المكتب الاداري',
'Mandatory': 'إلزامي',
'Manual Synchronization': 'مزامنة يدوية',
'Many': 'عدة',
'Map': 'خريطة',
'Map Center Latitude': 'خط العرض لمركز الخريطة',
'Map Center Longitude': 'خط الطول المركزي للخريطة',
'Map Height': 'إرتفاع الخريطة',
'Map Input Required': 'خريطة المدخلات المطلوبة',
'Map of Communities': 'خرائط المجتمعات',
'Map of Facilities': 'خريطة المرافق',
'Map of Hospitals': 'خريطة المستشفيات',
'Map of Offices': 'خريطة المكاتب',
'Map of Projects': 'خريطة المشاريع',
'Map of Resources': 'خريطة الموارد',
'Map of Warehouses': 'خريطة المستودعات',
'Map Profile added': 'تمت اضافة تكوين الخريطة',
'Map Profile deleted': 'تم حذف تكوين الخريطة',
'Map Profiles': 'تكوينات الخريطة',
'Map Service Catalog': 'كتالوج خدمات الخريطة',
'Map Settings': 'اعدادات الخريطة',
'Map Width': 'عرض الخريطة',
'Map Zoom': 'تكبير الخريطة',
'Marital Status': 'الحالة الإجتماعية',
'Mark as duplicate': 'وضع علامة مكرر',
'Marker': 'علامة',
'Marker deleted': 'تم حذف العلامة',
'Marker Details': 'تفاصيل العلامة',
'Markets/Marketing Analysis, Linkages and Support': 'الأسواق/تحليل التسويق، الروابط والدعم',
'married': 'متزوج',
'married (not legally recognized)': 'متزوج (غير معترف بها قانونيا)',
'Match': 'مطابقة',
'Match Requests': 'طلبات متطابقة',
'Matching Catalog Items': 'مطابقة عناصر الكتالوج',
'Matching Records': 'مطابقة السجلات ',
'Maternal, Newborn and Child Health': 'صحة الأم والوليد وصحة الطفل',
'Maximum': 'أقصى',
'Maximum Location Latitude': 'الموقع الأقصى لخط العرض',
'Maximum Location Longitude': 'أقصى خط طول للموقع',
'Means of Verification': 'وسائل التحقق',
'Measurement Frequency': 'تردد القياس',
'Measurement Procedure': 'إجراء القياس',
'Media': 'وسائل الإعلام',
'Medical Conditions': 'الحاله الصحية',
'Medical Services': 'الخدمات الطبية',
'Medical Supplies and Equipment': 'اللوازم الطبية والمعدات',
'medium': 'متوسط(ة)',
'Medium': 'متوسط',
'medium<12cm': 'متوسطة <12CM',
'Meetings': 'اجتماعات',
'Member': 'عضو',
'Member Base Development': 'تطوير قاعدة الأعضاء',
'Member Organizations': 'المنظمات الأعضاء',
'Members': 'أفراد',
'Members Deployed': 'الأعضاء المنتشرة',
'Membership': 'عضوية',
'Membership Approved': 'العضوية المعتمدة',
'Membership Details': 'تفاصيل العضوية',
'Membership Fee Last Paid': 'رسوم العضوية- آخر مدفوع ',
'Membership Types': 'أنواع العضوية',
'Membership updated': 'تم تحديث العضوية',
'Memberships': 'عضوية',
'Men': 'رجالي',
'Mental Health': 'الصحة النفسية',
'Mental Health Support': 'دعم الصحة النفسية',
'Message': 'رسالة',
'Message added': 'تمت اضافة الرسالة',
'Message Details': 'تفاصيل الرسالة',
'Message field is required!': 'حقل الرسالة مطلوب!',
'Message Members': 'رسالة الأعضاء',
'Message Participants': 'رسالة المشاركين',
'Messages': 'رسائل',
'Messaging settings updated': 'تم تحدبث ضبط الرسائل',
'Methodology': 'منهجية',
'MH Complaint Type': 'نوع الشكوى المتعلقة بالصحة النفسية',
'Middle Name': 'الاسم الوسطى',
'Migrants or ethnic minorities': 'المهاجرون أو الأقليات العرقية',
'Milestone': 'معلما',
'Milestone Added': 'معلم تم إضافته',
'Milestone Deleted': 'معلم تم حذفه',
'Milestone Details': 'تفاصيل المعلم',
'Milestone Updated': 'معلم تم تحديثه',
'Milestones': 'معالم',
'Military': 'عسكري',
'Military Service': 'الخدمة العسكرية',
'Minimum': 'الحد الأدنى',
'Minimum Location Longitude': 'خطوط الطول الأدنى للموقع',
'Minor Damage': 'أضرار طفيفة',
'Minorities participating in coping activities': 'الأقلية المشاركة في الأنشطة',
'Minute': 'دقيقة',
'Minutes must be 0..59': 'يجب أن تكون دقيقة 0..59',
'Minutes must be a number.': 'يجب أن تكون دقيقة في العدد.',
'Minutes must be less than 60.': 'يجب أن تكون دقيقة أقل من 60.',
'Miscellaneous': 'متفرقات',
'Missing': 'مفقود',
'missing': 'مفقود',
'Missing Person': 'الشخص المفقود',
'Missing Person Details': 'تفاصيل الشخص المفقود',
'Missing Person Registry': 'سجل الشخص المفقود',
'Missing Persons': 'اشخاص مفقودين',
'Missing Report': 'تقرير مفقود',
'Missing Senior Citizen': 'كبار السن المفقودين',
'Mission': 'مهمة',
'Mission Details': 'تفاصيل المهمة',
'Mission updated': 'تم تحديث المهمة',
'Mobile': 'المحمول',
'Mobile Health Units': 'وحدات صحية متنقلة',
'Mobile Phone': 'الهاتف المحمول',
'Mode': 'النمط',
'Model/Type': 'نوع/ نموذج',
'Modem Settings': 'ضبط الموديم',
'Modem settings updated': 'تم تحديث إعدادات المودم',
'Modifying data in spreadsheet before importing it to the database': 'تعديل البيانات في جدول قبل استيراده إلى قاعدة البيانات',
'module allows the site administrator to configure various options.': 'الوحدة التي تسمح لمسؤول عن الموقع لضبط مختلف الخيارات.',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'توفر الوحدة آلية تعاونية و التي تزود بلمحة عامة عن الكوارث النامية ، وذلك باستخدام الخرائط مباشرة على شبكة الإنترنت (GIS).',
'Monday': 'الإثنين',
'mongoloid': 'منغولي',
'Monitoring and Evaluation': 'رصد وتقييم',
'Month': 'شهر',
'Monthly Cost': 'التكلفة الشهرية',
'Monthly Membership Fee': 'رسوم العضوية الشهرية',
'Monthly Rent Expense': 'نفقات الإيجار الشهري',
'Monthly Salary': 'الراتب الشهري',
'Monthly Status': 'الحالة الشهرية',
'Months': 'أشهر',
'more': 'المزيد',
'More Options': 'خيارات اخرى',
'more...': 'أكثر...',
'Morgue': 'مشرحة',
'Morgue Status': 'حالة المشرحة',
'Morgue Units Available': 'وحدات المشرحة المتوفرة',
'Mosque': 'مسجد',
'Moustache': 'شوارب',
'Movement Cooperation': 'تعاون الحركة',
'Multiple Matches': 'تعدد التطابقات',
'Multiple Options': 'خيارات متعددة',
'Muslim': 'مسلم',
'Must be unique': 'يجب أن تكون فريدة من نوعها',
'My Logged Hours': 'الساعات التي تم تسجيل الدخول الخاصة بي',
'My Maps': 'خرائطي',
'My Tasks': 'مهامي',
'N/A': 'غير موجود',
'Name': 'الاسم',
'Name and/or ID': 'اسم و / أو ID',
'Name of a programme or another project which this project is implemented as part of': 'اسم البرنامج أو مشروع آخر الذي يتم تنفيذ هذا المشروع كجزء من',
'Name of Award': 'أسم مستوى التقدم',
'Name of Driver': '<NAME>',
'Name of Father': '<NAME>',
'Name of Grandfather': '<NAME>',
'Name of Grandmother': '<NAME>',
'Name of Institute': 'أسم المعهد',
'Name of Meeting': 'اسم الإجتماع',
'Name of Mother': '<NAME>',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'يقع اسم الملف (و المسار الفرعي الإختياري ) في وجهات النظر التي يجب استخدامها أسفل الصفحة .',
'Name of the person in local language and script (optional).': 'اسم الشخص في اللغة المحلية والكتابة (اختياري).',
'Names can be added in multiple languages': 'يمكن إضافة الأسماء في لغات متعددة',
'Narrative Report': 'تقرير سردي',
'National': 'وطني',
'National ID': 'الهوية الوطنية',
'National ID Card': 'بطاقة الهوية الوطنية',
'National Societies': 'المنظمات الوطنية',
'National Society': 'الجمعية الوطنية',
'National Society / Branch': 'فروع / الجمعية الوطنية',
'National Society added': 'تم اضافة الجمعية الوطنية',
'National Society deleted': 'الجمعية الوطنية تم حذف',
'National Society Details': 'تفاصيل المجتمع الوطني',
'National Society updated': 'الجمعية الوطنية تم تحديثها',
'Nationality': 'الجنسيه',
'Nationality of the person.': 'جنسية الشخص.',
'native': 'اللغة الأم',
'Native language': 'اللغة الأم',
'NDRT (National Disaster Response Teams)': 'NDRT ( الفريق الوطني للاستجابة للكوارث)',
'Need for SNF': 'الحاجة لصندوق الاحتياحات الخاصة',
'Need to be logged-in to be able to submit assessments': 'يجب أن يتم التسجيل للتمكن من تقديم التقييمات',
'Need to specify a group!': 'بحاجة إلى تحديد مجموعة!',
'Need to specify a Kit!': 'تحتاج إلى تخصيص طقم!',
'Need to specify a Resource!': 'يجب تحديد المصدر!',
'Need to specify a table!': 'تحتاج إلى تحديد جدول!',
'Need to specify a user!': 'تحتاج إلى تحديد المستخدم!',
'Need Type': 'نوع الحاجة',
'Need Type deleted': 'تم حذف نوع الحاجة',
'Need Type updated': 'تم تحديث نوع الحاجة',
'Need Types': 'أنواع الإحتياجات',
'Needs': 'الاحتياجات',
'Needs Details': 'تحتاج إلى | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
from collections import namedtuple, OrderedDict
import tensorflow as tf
from tensorflow.python.training import moving_averages
_BATCH_NORM_DECAY = 0.9 #0.997
_BATCH_NORM_EPSILON = 1e-5
_USE_BIAS = False
_KERNEL_INITIALIZER=tf.variance_scaling_initializer(mode='fan_out')
def sample_arch(num_cells):
#arc_seq = tf.TensorArray(tf.int32, size=num_cells * 4)
arc_seq = []
for cell_id in range(num_cells):
for branch_id in range(2):
index = tf.random_uniform([1], minval=0, maxval=cell_id+1, dtype=tf.int32)
arc_seq.append(index)
config_id = tf.random_uniform([1], minval=0, maxval=11, dtype=tf.int32)#11
arc_seq.append(config_id)
arc_seq = tf.concat(arc_seq, axis=0)
return arc_seq
def sample_arch_from_pool(arch_pool, prob=None):
N = len(arch_pool)
arch_pool = tf.convert_to_tensor(arch_pool, dtype=tf.int32)
if prob is not None:
prob = tf.expand_dims(tf.squeeze(prob),axis=0)
index = tf.multinomial(prob, 1)[0][0]
else:
index = tf.random_uniform([], minval=0, maxval=N, dtype=tf.int32)
arch = arch_pool[index]
conv_dag = arch[0]
reduc_dag = arch[1]
return conv_dag, reduc_dag
def create_weight(name, shape, initializer=None, trainable=True, seed=None):
if initializer is None:
initializer = _KERNEL_INITIALIZER
return tf.get_variable(name, shape, initializer=initializer, trainable=trainable)
def create_bias(name, shape, initializer=None):
if initializer is None:
initializer = tf.constant_initializer(0.0, dtype=tf.float32)
return tf.get_variable(name, shape, initializer=initializer)
def get_channel_dim(x, data_format='INVALID'):
assert data_format != 'INVALID'
assert x.shape.ndims == 4
if data_format == 'channels_first':
return x.shape[1].value
else:
return x.shape[3].value
def get_channel_index(data_format='INVALID'):
assert data_format != 'INVALID'
axis = 1 if data_format == 'channels_first' else 3
return axis
def batch_normalization(x, data_format, is_training):
if data_format == "channels_first":
shape = [x.get_shape()[1]]
elif data_format == "channels_last":
shape = [x.get_shape()[3]]
else:
raise NotImplementedError("Unknown data_format {}".format(data_format))
with tf.variable_scope('batch_normalization'):#, reuse=None if is_training else True):
offset = tf.get_variable(
"offset", shape,
initializer=tf.constant_initializer(0.0, dtype=tf.float32))
scale = tf.get_variable(
"scale", shape,
initializer=tf.constant_initializer(1.0, dtype=tf.float32))
moving_mean = tf.get_variable(
"moving_mean", shape, trainable=False,
initializer=tf.constant_initializer(0.0, dtype=tf.float32))
moving_variance = tf.get_variable(
"moving_variance", shape, trainable=False,
initializer=tf.constant_initializer(1.0, dtype=tf.float32))
if is_training:
x, mean, variance = tf.nn.fused_batch_norm(
x, scale, offset, epsilon=_BATCH_NORM_EPSILON,
data_format='NCHW' if data_format == "channels_first" else 'NHWC',
is_training=True)
update_mean = moving_averages.assign_moving_average(
moving_mean, mean, _BATCH_NORM_DECAY)
update_variance = moving_averages.assign_moving_average(
moving_variance, variance, _BATCH_NORM_DECAY)
with tf.control_dependencies([update_mean, update_variance]):
x = tf.identity(x)
else:
x, _, _ = tf.nn.fused_batch_norm(x, scale, offset, mean=moving_mean,
variance=moving_variance,
epsilon=_BATCH_NORM_EPSILON,
data_format='NCHW' if data_format == "channels_first" else 'NHWC',
is_training=False)
return x
def factorized_reduction(inputs, filters, strides, data_format, is_training):
assert filters % 2 == 0, (
'Need even number of filters when using this factorized reduction')
if strides == 1:
with tf.variable_scope('path_conv'):
inputs = tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=1,
strides=strides, padding='SAME', use_bias=_USE_BIAS,
kernel_initializer=_KERNEL_INITIALIZER,
data_format=data_format)
with tf.variable_scope('path_bn'):
inputs = batch_normalization(inputs, data_format, is_training)
return inputs
path1 = tf.layers.average_pooling2d(inputs, pool_size=1, strides=strides, padding='VALID', data_format=data_format)
with tf.variable_scope('path1_conv'):
path1 = tf.layers.conv2d(
inputs=path1, filters=int(filters / 2), kernel_size=1,
strides=1, padding='SAME', use_bias=_USE_BIAS,
kernel_initializer=_KERNEL_INITIALIZER,
data_format=data_format)
if data_format == 'channels_first':
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(inputs, pad_arr)[:, :, 1:, 1:]
else:
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(inputs, pad_arr)[:, 1:, 1:, :]
path2 = tf.layers.average_pooling2d(path2, pool_size=1, strides=strides, padding='VALID', data_format=data_format)
with tf.variable_scope('path2_conv'):
path2 = tf.layers.conv2d(
inputs=path2, filters=int(filters / 2), kernel_size=1,
strides=1, padding='SAME', use_bias=_USE_BIAS,
kernel_initializer=_KERNEL_INITIALIZER,
data_format=data_format)
final_path = tf.concat(values=[path1, path2], axis=get_channel_index(data_format))
with tf.variable_scope('final_path_bn'):
inputs = batch_normalization(final_path, data_format, is_training)
return inputs
class NASCell(object):
def __init__(self, filters, dag, num_nodes, drop_path_keep_prob, num_cells,
total_steps, data_format, is_training):
self._filters = filters
self._dag = dag
self._num_nodes = num_nodes
self._drop_path_keep_prob = drop_path_keep_prob
self._num_cells = num_cells
self._total_steps = total_steps
self._is_training = is_training
self._data_format = data_format
def _reduce_prev_layer(self, prev_layer, curr_layer, is_training):
if prev_layer is None:
return curr_layer
curr_num_filters = self._filter_size
data_format = self._data_format
#is_training = self._is_training
prev_num_filters = get_channel_dim(prev_layer, data_format)
curr_filter_shape = int(curr_layer.shape[2])
prev_filter_shape = int(prev_layer.shape[2])
if curr_filter_shape != prev_filter_shape:
prev_layer = tf.nn.relu(prev_layer)
prev_layer = factorized_reduction(prev_layer, curr_num_filters, 2, data_format, is_training)
elif curr_num_filters != prev_num_filters:
prev_layer = tf.nn.relu(prev_layer)
with tf.variable_scope('prev_1x1'):
prev_layer = tf.layers.conv2d(
inputs=prev_layer, filters=curr_num_filters, kernel_size=1,
strides=1, padding='SAME', use_bias=_USE_BIAS,
kernel_initializer=_KERNEL_INITIALIZER,
data_format=data_format)
with tf.variable_scope('prev_bn'):
prev_layer = batch_normalization(prev_layer, data_format, is_training)
return prev_layer
def _nas_conv(self, x, curr_cell, prev_cell, filter_size, out_filters, stack_conv=1):
with tf.variable_scope("conv_{0}x{0}".format(filter_size)):
num_possible_inputs = curr_cell + 2
for conv_id in range(stack_conv):
with tf.variable_scope("stack_{0}".format(conv_id)):
# create params and pick the correct path
inp_c = get_channel_dim(x, self._data_format)
w = create_weight(
"w", [num_possible_inputs, filter_size * filter_size * inp_c * out_filters],
initializer=_KERNEL_INITIALIZER)
w = w[prev_cell, :]
w = tf.reshape(
w, [filter_size, filter_size, inp_c, out_filters])
with tf.variable_scope("bn"):
zero_init = tf.initializers.zeros(dtype=tf.float32)
one_init = tf.initializers.ones(dtype=tf.float32)
offset = create_weight(
"offset", [num_possible_inputs, out_filters],
initializer=zero_init)
scale = create_weight(
"scale", [num_possible_inputs, out_filters],
initializer=one_init)
offset = offset[prev_cell]
scale = scale[prev_cell]
# the computations
x = tf.nn.relu(x)
x = tf.nn.conv2d(
x,
filter=w,
strides=[1, 1, 1, 1], padding="SAME",
data_format='NCHW' if self._data_format=='channels_first' else 'NHWC')
#x = batch_normalization(x, self._data_format, self._is_training)
x, _, _ = tf.nn.fused_batch_norm(
x, scale, offset, epsilon=_BATCH_NORM_EPSILON, is_training=True,
data_format='NCHW' if self._data_format=='channels_first' else 'NHWC')
return x
def _nas_sep_conv(self, x, curr_cell, prev_cell, filter_size, out_filters, stack_conv=2):
with tf.variable_scope("sep_conv_{0}x{0}".format(filter_size)):
num_possible_inputs = curr_cell + 2
for conv_id in range(stack_conv):
with tf.variable_scope("stack_{0}".format(conv_id)):
# create params and pick the correct path
inp_c = get_channel_dim(x, self._data_format)
w_depthwise = create_weight(
"w_depth", [num_possible_inputs, filter_size * filter_size * inp_c],
initializer=_KERNEL_INITIALIZER)
w_depthwise = w_depthwise[prev_cell, :]
w_depthwise = tf.reshape(
w_depthwise, [filter_size, filter_size, inp_c, 1])
w_pointwise = create_weight(
"w_point", [num_possible_inputs, inp_c * out_filters],
initializer=_KERNEL_INITIALIZER)
w_pointwise = w_pointwise[prev_cell, :]
w_pointwise = tf.reshape(w_pointwise, [1, 1, inp_c, out_filters])
with tf.variable_scope("bn"):
zero_init = tf.initializers.zeros(dtype=tf.float32)
one_init = tf.initializers.ones(dtype=tf.float32)
offset = create_weight(
"offset", [num_possible_inputs, out_filters],
initializer=zero_init)
scale = create_weight(
"scale", [num_possible_inputs, out_filters],
initializer=one_init)
offset = offset[prev_cell]
scale = scale[prev_cell]
# the computations
x = tf.nn.relu(x)
x = tf.nn.separable_conv2d(
x,
depthwise_filter=w_depthwise,
pointwise_filter=w_pointwise,
strides=[1, 1, 1, 1], padding="SAME",
data_format='NCHW' if self._data_format=='channels_first' else 'NHWC')
#x = batch_normalization(x, self._data_format, self._is_training)
x, _, _ = tf.nn.fused_batch_norm(
x, scale, offset, epsilon=_BATCH_NORM_EPSILON, is_training=True,
data_format='NCHW' if self._data_format=='channels_first' else 'NHWC')
return x
def _nas_cell(self, x, curr_cell, prev_cell, op_id, out_filters):
num_possible_inputs = curr_cell + 1
with tf.variable_scope('max_pool_3x3'):
max_pool_3 = tf.layers.max_pooling2d(
x, [3, 3], [1, 1], "SAME", data_format=self._data_format)
max_pool_c = get_channel_dim(max_pool_3, self._data_format)
if max_pool_c != out_filters:
with tf.variable_scope("conv"):
w = create_weight(
"w", [num_possible_inputs, max_pool_c * out_filters],
initializer=_KERNEL_INITIALIZER)
w = w[prev_cell]
w = tf.reshape(w, [1, 1, max_pool_c, out_filters])
max_pool_3 = tf.nn.relu(max_pool_3)
max_pool_3 = tf.nn.conv2d(max_pool_3, w, strides=[1, 1, 1, 1], padding="SAME",
data_format='NCHW' if self._data_format == 'channels_first' else 'NHWC')
max_pool_3 = batch_normalization(max_pool_3, is_training=True, #self._is_training,
data_format=self._data_format)
with tf.variable_scope('avg_pool_3x3'):
avg_pool_3 = tf.layers.average_pooling2d(
x, [3, 3], [1, 1], "SAME", data_format=self._data_format)
avg_pool_c = get_channel_dim(avg_pool_3, self._data_format)
if avg_pool_c != out_filters:
with tf.variable_scope("conv"):
w = create_weight(
"w", [num_possible_inputs, avg_pool_c * out_filters],
initializer=_KERNEL_INITIALIZER)
w = w[prev_cell]
w = tf.reshape(w, [1, 1, avg_pool_c, out_filters])
avg_pool_3 = tf.nn.relu(avg_pool_3)
avg_pool_3 = tf.nn.conv2d(avg_pool_3, w, strides=[1, 1, 1, 1], padding="SAME",
data_format='NCHW' if self._data_format == 'channels_first' else 'NHWC')
avg_pool_3 = batch_normalization(avg_pool_3, is_training=True, #self._is_training,
data_format=self._data_format)
x_c = get_channel_dim(x, self._data_format)
if x_c != out_filters:
with tf.variable_scope("x_conv"):
w = create_weight("w", [num_possible_inputs, x_c * out_filters],
initializer=_KERNEL_INITIALIZER)
w = w[prev_cell]
w = tf.reshape(w, [1, 1, x_c, out_filters])
x = tf.nn.relu(x)
x = tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding="SAME",
data_format='NCHW' if self._data_format == 'channels_first' else 'NHWC')
x = batch_normalization(x, is_training=True, data_format=self._data_format)
out = [
x,
self._nas_sep_conv(x, curr_cell, prev_cell, 3, out_filters),
self._nas_sep_conv(x, curr_cell, prev_cell, 5, out_filters),
max_pool_3,
avg_pool_3
]
out = tf.stack(out, axis=0)
out = out[op_id, :, :, :, :]
return out
def _cell_base(self, last_inputs, inputs, is_training):
filters = self._filter_size
data_format = self._data_format
#is_training = self._is_training
with tf.variable_scope('transforme_last_inputs'):
if last_inputs is None:
last_inputs = inputs
last_inputs = self._reduce_prev_layer(last_inputs, inputs, is_training)
with tf.variable_scope('transforme_inputs'):
inputs = tf.nn.relu(inputs)
with tf.variable_scope('1x1'):
inputs = tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=1,
strides=1, padding='SAME', use_bias=_USE_BIAS,
kernel_initializer=_KERNEL_INITIALIZER,
data_format=data_format)
with tf.variable_scope('bn'):
inputs = batch_normalization(inputs, data_format, is_training=is_training)
return last_inputs, inputs
def __call__(self, inputs, filter_scaling=1, strides=1,
last_inputs=None, cell_num=-1):
self._cell_num = cell_num
self._filter_scaling = filter_scaling
self._filter_size = int(self._filters * filter_scaling)
num_nodes = self._num_nodes
dag = self._dag
data_format = self._data_format
# node 1 and node 2 are last_inputs and inputs respectively
# begin processing from node 3
last_inputs, inputs = self._cell_base(last_inputs, inputs, is_training=True)
layers = [last_inputs, inputs]
used = []
for i in xrange(num_nodes):
prev_layers = tf.stack(layers, axis=0)
with tf.variable_scope('cell_{}'.format(i+1)):
with tf.variable_scope('x'):
x_id = dag[4*i]
x_op = dag[4*i+1]
x = prev_layers[x_id, :, :, :, :]
x = self._nas_cell(x, i, x_id, x_op, self._filter_size)
x_used = tf.one_hot(x_id, depth=num_nodes+2, dtype=tf.int32)
with tf.variable_scope('y'):
y_id = dag[4*i+2]
y_op = dag[4*i+3]
y = prev_layers[y_id, :, :, :, :]
y = self._nas_cell(y, i, y_id, y_op, self._filter_size)
y_used = tf.one_hot(y_id, depth=num_nodes+2, dtype=tf.int32)
output = x + y
used.extend([x_used, y_used])
layers.append(output)
used = tf.add_n(used)
indices = tf.where(tf.equal(used, 0))
indices = tf.to_int32(indices)
indices = tf.reshape(indices, [-1])
num_outs = tf.size(indices)
out = tf.stack(layers, axis=0)
out = tf.gather(out, indices, axis=0)
inp = prev_layers[0]
if self._data_format == "channels_last":
N = tf.shape(inp)[0]
H = | |
<gh_stars>1-10
######################################################################
# <NAME>, mai 2019
#
# methodologie
#
# 1) on rassemble les fichiers netcdf des differentes eccc en un seul fichier netCDF.
#
# 2) on scan les fichiers sources annuels en cherchant une variable et on sauve
# ce qu'on trouve dans des fichiers netcdf. On applique aussi les flags
# et on fait les changements d'unites
#
# obtenu via http://climate.weather.gc.ca/index_e.html en cliquant sur 'about the data'
#######################################################################
import itertools
import logging
import tempfile
import time
from calendar import monthrange
from datetime import datetime as dt
from logging import config
from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import xarray as xr
from dask.diagnostics import ProgressBar
from miranda.scripting import LOGGING_CONFIG
from ._utils import daily_metadata, hourly_metadata
config.dictConfig(LOGGING_CONFIG)
__all__ = [
"aggregate_stations",
"convert_hourly_flat_files",
"convert_daily_flat_files",
"merge_converted_variables",
]
def convert_hourly_flat_files(
source_files: Union[str, Path],
output_folder: Union[str, Path, List[Union[str, int]]],
variables: Union[str, int, List[Union[str, int]]],
missing_value: int = -9999,
) -> None:
"""
Parameters
----------
source_files : str or Path
output_folder : str or Path
variables : str or List[str]
missing_value : int
Returns
-------
None
"""
func_time = time.time()
if isinstance(variables, (str, int)):
variables = [variables]
for variable_code in variables:
info = hourly_metadata(variable_code)
variable_code = str(variable_code).zfill(3)
variable_name = info["standard_name"]
variable_file_name = info["nc_name"]
# Preparing the data extraction
col_names = "code year month day code_var ".split()
for i in range(1, 25):
col_names.append("D{:0n}".format(i))
col_names.append("F{:0n}".format(i))
rep_nc = Path(output_folder).joinpath(variable_file_name)
rep_nc.mkdir(parents=True, exist_ok=True)
# Loop on the files
list_files = list()
if isinstance(source_files, list) or Path(source_files).is_file():
list_files.append(source_files)
elif 262 < int(variable_code) <= 280:
list_files.extend(
[f for f in Path(source_files).rglob("HLY*RCS*") if f.is_file()]
)
else:
list_files.extend(
[f for f in Path(source_files).rglob("HLY*") if f.is_file()]
)
errored_files = list()
for fichier in list_files:
logging.info(f"Processing file: {fichier}.")
# Create a dataframe from the files
try:
df = pd.read_fwf(
fichier,
widths=[7, 4, 2, 2, 3] + [6, 1] * 24,
names=col_names,
dtype={"year": int, "month": int, "day": int, "code_var": str},
)
except FileNotFoundError:
logging.error(f"File {fichier} was not found.")
errored_files.append(fichier)
continue
except (UnicodeDecodeError, Exception):
logging.error(
f"File {fichier} was unable to be read. This is probably an issue with the file."
)
errored_files.append(fichier)
continue
# Loop through the station codes
l_codes = df["code"].unique()
for code in l_codes:
df_code = df[df["code"] == code]
# Abort if the variable is not found
if variable_code not in df_code["code_var"].unique():
logging.info(
"Variable `{}` not found for station code: {}. Continuing...".format(
variable_file_name, code
)
)
continue
# Treat the data
logging.info(
"Converting `{}` for station code: {}".format(
variable_file_name, code
)
)
# Dump the data into a DataFrame
df_var = df_code[df_code["code_var"] == variable_code].copy()
# Mask the data according to the missing values flag
df_var = df_var.replace(missing_value, np.nan)
# Decode the values and flags
dfd = df_var.loc[:, ["D{:0n}".format(i) for i in range(1, 25)]]
dff = df_var.loc[:, ["F{:0n}".format(i) for i in range(1, 25)]]
# Remove the "NaN" flag
dff = dff.fillna("")
# Use the flag to mask the values
try:
val = np.asfarray(dfd.values)
except ValueError as e:
logging.error(f"{e} raised from {dfd}, continuing...")
continue
flag = dff.values
mask = np.isin(flag, info["missing_flags"])
val[mask] = np.nan
# Treat according to units conversions
val = val * info["scale_factor"] + info["add_offset"]
# Create the DataArray
dates = dict(time=list())
for index, row in df_var.iterrows():
for h in range(0, 24):
dates["time"].append(
dt(int(row.year), int(row.month), int(row.day), h)
)
ds = xr.Dataset()
da_val = xr.DataArray(val.flatten(), coords=dates, dims=["time"])
da_val = da_val.rename(variable_file_name)
da_val.attrs["units"] = info["nc_units"]
da_val.attrs["id"] = code
da_val.attrs["element_number"] = variable_code
da_val.attrs["standard_name"] = variable_name
da_val.attrs["long_name"] = info["long_name"]
da_flag = xr.DataArray(flag.flatten(), coords=dates, dims=["time"])
da_flag.attrs["long_name"] = "data flag"
da_flag.attrs["note"] = "See ECCC technical documentation for details"
ds[variable_file_name] = da_val
ds["flag"] = da_flag
# save the file in NetCDF format
start_year = ds.time.dt.year.values[0]
end_year = ds.time.dt.year.values[-1]
station_folder = rep_nc.joinpath(str(code))
station_folder.mkdir(parents=True, exist_ok=True)
if start_year == end_year:
f_nc = "{c}_{vc}_{v}_{sy}.nc".format(
c=code, vc=variable_code, v=variable_file_name, sy=start_year
)
else:
f_nc = "{c}_{vc}_{v}_{sy}_{ey}.nc".format(
c=code,
vc=variable_code,
v=variable_file_name,
sy=start_year,
ey=end_year,
)
ds.attrs["Conventions"] = "CF-1.7"
ds.attrs[
"title"
] = "Environment and Climate Change Canada (ECCC) weather eccc"
ds.attrs[
"history"
] = "{}: Merged from multiple individual station files to n-dimensional array.".format(
dt.now().strftime("%Y-%m-%d %X")
)
ds.attrs["version"] = f"v{dt.now().strftime('%Y.%m')}"
ds.attrs["institution"] = "Environment and Climate Change Canada (ECCC)"
ds.attrs[
"source"
] = "Weather Station data <<EMAIL>>"
ds.attrs[
"references"
] = "https://climate.weather.gc.ca/doc/Technical_Documentation.pdf"
ds.attrs[
"comment"
] = "Acquired on demand from data specialists at ECCC Climate Services / Services Climatiques"
ds.attrs[
"redistribution"
] = "Redistribution policy unknown. For internal use only."
ds.to_netcdf(station_folder.joinpath(f_nc))
logging.warning(
"Process completed in {:.2f} seconds".format(time.time() - func_time)
)
def convert_daily_flat_files(
source_files: Union[str, Path],
output_folder: Union[str, Path],
variables: Union[str, int, List[Union[str, int]]],
missing_value: int = -9999,
) -> None:
"""
Parameters
----------
source_files : Union[str, Path]
output_folder : Union[str, Path]
variables : Union[str, int, List[Union[str, int]]
Variable codes (001, 002, 103, etc.)
missing_value : int
Returns
-------
None
"""
func_time = time.time()
if isinstance(variables, (str, int)):
variables = [variables]
for variable_code in variables:
info = daily_metadata(variable_code)
variable_code = str(variable_code).zfill(3)
nc_name = info["nc_name"]
# Prepare the data extraction
titre_colonnes = "code year month code_var".split()
for i in range(1, 32):
titre_colonnes.append("D{:0n}".format(i))
titre_colonnes.append("F{:0n}".format(i))
# Create the output directory
rep_nc = Path(output_folder).joinpath(nc_name)
rep_nc.mkdir(parents=True, exist_ok=True)
# Loop on the files
list_files = list()
if isinstance(source_files, list) or Path(source_files).is_file():
list_files.append(source_files)
else:
list_files.extend(
[f for f in Path(source_files).rglob("*DLY*") if f.is_file()]
)
errored_files = list()
for fichier in list_files:
logging.info("Processing file: {}.".format(fichier))
# Create a Pandas DataFrame from the files
try:
df = pd.read_fwf(
fichier,
widths=[7, 4, 2, 3] + [6, 1] * 31,
names=titre_colonnes,
dtype={"year": int, "month": int, "code_var": str},
)
except ValueError:
logging.error(
"File {} was unable to be read. This is probably an issue with the file.".format(
fichier
)
)
errored_files.append(fichier)
continue
# Loop through the station codes
l_codes = df["code"].unique()
for code in l_codes:
df_code = df[df["code"] == code]
# Abort if the variable is not present
if variable_code not in df_code["code_var"].unique():
logging.info(
"Variable `{}` not found for station `{}` in file {}. Continuing...".format(
nc_name, code, fichier
)
)
continue
# Perform the data treatment
logging.info("Converting {} for station code: {}".format(nc_name, code))
# Dump the values into a DataFrame
df_var = df_code[df_code["code_var"] == variable_code].copy()
# Apply the mask according to the NaN value
df_var = df_var.replace(missing_value, np.nan)
# Decoding the values and flags
dfd = df_var.loc[:, ["D{:0n}".format(i) for i in range(1, 32)]]
dff = df_var.loc[:, ["F{:0n}".format(i) for i in range(1, 32)]]
# Remove the "NaN" flag
dff = dff.fillna("")
try:
# Use the flag to mask the values
val = np.asfarray(dfd.values)
flag = dff.values
mask = np.isin(flag, info["missing_flags"])
val[mask] = np.nan
except ValueError:
continue
# Adjust units
val = val * info["scale_factor"] + info["add_offset"]
# Create the DataArray and concatenate values and flags based on day-length of months
date_range = dict(time=list())
value_days = list()
flag_days = list()
for i, (index, row) in enumerate(df_var.iterrows()):
period = pd.Period(year=row.year, month=row.month, freq="M")
dates = pd.Series(
pd.date_range(
start=period.start_time, end=period.end_time, freq="D"
)
)
date_range["time"].extend(dates)
value_days.extend(val[i][range(monthrange(row.year, row.month)[1])])
flag_days.extend(flag[i][range(monthrange(row.year, row.month)[1])])
ds = xr.Dataset()
da_val = xr.DataArray(value_days, coords=date_range, dims=["time"])
da_val = da_val.rename(nc_name)
da_val.attrs["units"] = info["nc_units"]
da_val.attrs["id"] = code
da_val.attrs["element_number"] = variable_code
da_val.attrs["standard_name"] = info["standard_name"]
da_val.attrs["long_name"] = info["long_name"]
da_flag = xr.DataArray(flag_days, coords=date_range, dims=["time"])
da_flag.attrs["long_name"] = "data flag"
da_flag.attrs["note"] = "See ECCC technical documentation for details"
ds[nc_name] = da_val
ds["flag"] = da_flag
# Save as a NetCDF file
start_year = ds.time.dt.year.values[0]
end_year = ds.time.dt.year.values[-1]
station_folder = rep_nc.joinpath(str(code))
station_folder.mkdir(parents=True, exist_ok=True)
if start_year == end_year:
f_nc = "{c}_{vc}_{v}_{sy}.nc".format(
c=code, vc=variable_code, v=nc_name, sy=start_year
)
else:
f_nc = "{c}_{vc}_{v}_{sy}_{ey}.nc".format(
c=code,
vc=variable_code,
v=nc_name,
sy=start_year,
ey=end_year,
)
ds.attrs["Conventions"] = "CF-1.7"
ds.attrs[
"title"
] = "Environment and Climate Change Canada (ECCC) weather eccc"
ds.attrs[
"history"
] = "{}: Merged from multiple individual station files to n-dimensional array.".format(
dt.now().strftime("%Y-%m-%d %X")
)
ds.attrs["version"] = "v{}".format(dt.now().strftime("%Y.%m"))
ds.attrs["institution"] = "Environment and Climate Change Canada (ECCC)"
ds.attrs[
"source"
] = "Weather Station data <<EMAIL>>"
ds.attrs[
"references"
] = "https://climate.weather.gc.ca/doc/Technical_Documentation.pdf"
ds.attrs[
"comment"
] = "Acquired on demand from data specialists at ECCC Climate Services / Services Climatiques"
ds.attrs[
"redistribution"
] = "Redistribution policy unknown. For internal use only."
ds.to_netcdf(station_folder.joinpath(f_nc))
logging.warning(
"Process completed in {:.2f} seconds".format(time.time() - func_time)
)
def aggregate_stations(
source_files: Optional[Union[str, Path]] = None,
output_folder: Optional[Union[str, Path]] = None,
station_metadata: Union[str, Path] = None,
time_step: str = "h",
variables: Optional[Union[str, int, List[Union[str, | |
it again. Use cmd manually_correct_theme')
if item.theme_end != -1 and item.type == "episode":
if (not skip_done and item.correct_theme_start) or not item.correct_theme_start:
click.echo('Found theme_start at %s %s theme_end %s %s' % (item.theme_start,
item.theme_start_str, item.theme_end, item.theme_end_str))
client.playMedia(media, offset=item.theme_start * 1000)
time.sleep(1)
start_match = click.prompt('Was theme_start at %s correct? [y or MM:SS]' % item.theme_start_str)
if start_match:
if start_match in ['y', 'yes']:
item.correct_theme_start = item.theme_start
else:
item.correct_theme_start = to_sec(start_match)
if (not skip_done and item.correct_theme_end) or not item.correct_theme_end:
client.playMedia(media, offset=item.theme_end * 1000)
end_match = click.prompt('Was theme_end at %s correct? [y or MM:SS]' % item.theme_end_str)
if end_match:
if end_match in ['y', 'yes']:
item.correct_theme_end = item.theme_end
else:
item.correct_theme_end = to_sec(end_match)
if item.ffmpeg_end:
if (not skip_done and item.correct_ffmpeg) or not item.correct_ffmpeg:
click.echo('Found ffmpeg_end at sec %s time %s' % (item.ffmpeg_end, item.ffmpeg_end_str))
if item.ffmpeg_end > 30:
j = item.ffmpeg_end - 20
else:
j = item.ffmpeg_end
client.playMedia(media, offset=j * 1000)
time.sleep(1)
match = click.prompt('Was ffmpeg_end at %s correct? [y or MM:SS]' % item.ffmpeg_end_str)
if match:
if match.lower() in ['y', 'yes']:
item.correct_ffmpeg = item.ffmpeg_end
else:
item.correct_ffmpeg = to_sec(match)
# This needs to be tested manually.
if item.credits_start and item.credits_start != 1:
if (not skip_done and item.correct_credits_start) or not item.correct_credits_start:
click.echo('Found credits start as sec %s time %s' % (item.credits_start, item.credits_start_str))
client.playMedia(media, offset=item.credits_start - 10)
time.sleep(1)
match = click.prompt('Did the credits start at %s correct? [y or MM:SS]' % item.credits_start_str)
if match:
if match.lower() in ['y', 'yes']:
item.correct_credits_start = item.credits_start
else:
item.correct_credits_start = to_sec(match)
click.clear()
# Commit this shit after each loop.
if se.dirty:
se.commit()
click.echo('Done')
@cli.command()
@click.option('-n', '--name', help='Search for a show.', default=None)
@click.option('-s', '--sample', default=0, help='Process N episodes of all shows.', type=int)
@click.option('-t', '--threads', help='Threads to uses', default=1, type=int)
@click.option('-sd', '--skip_done', help='Skip media items that exist in the db', default=True, is_flag=True)
def process(name, sample, threads, skip_done):
"""Manual process some/all eps.
You will asked for what you want to process
Args:
name (None): Pass a name of a show you want to process
sample (int): process x eps for all shows.
threads (int): How many thread to use
skip_done(bool): Should we skip stuff that is processed.
Return:
None
"""
global HT
all_items = []
if name:
medias = find_all_movies_shows()
medias = [s for s in medias if s.title.lower().startswith(name.lower())]
medias = choose('Select what item to process', medias, 'title')
for media in medias:
if media.TYPE == 'show':
eps = media.episodes()
eps = choose('Select episodes', eps, lambda x: '%s %s' % (x._prettyfilename(), x.title))
all_items += eps
else:
all_items.append(media)
if sample:
def lol(i):
if i.TYPE == 'show':
x = i.episodes()[:sample]
return all_items.extend(x)
else:
return all_items.append(i)
find_all_movies_shows(lol)
if skip_done:
# Now there must be a better way..
with session_scope() as se:
items = se.query(Processed).all()
for item in items:
for ep in all_items:
if ep.ratingKey == item.ratingKey:
click.secho("Removing %s at it's already is processed" % item.prettyname, fg='red')
all_items.remove(ep)
HT = get_hashtable()
def prot(item):
try:
process_to_db(item)
except Exception as e:
logging.error(e, exc_info=True)
if all_items:
p = Pool(threads)
# Download all the themes first, skip the ones that we already have..
gr = set([i.grandparentRatingKey for i in all_items if i.TYPE == 'episode']) - set(HT.get_themes().keys())
LOG.debug('Downloading theme for %s shows this might take a while..', len(gr))
if len(gr):
sh = p.map(PMS.fetchItem, gr)
try:
p.map(HT.has_theme, sh)
except KeyboardInterrupt:
pass
try:
p.map(prot, all_items)
except KeyboardInterrupt:
p.terminate()
@cli.command()
@click.argument('name', type=click.Path(exists=True))
@click.option('-trim', default=600, help='Only get the first x seconds', type=int)
@click.option('-dev', default=7, help='Accepted deviation between audio and video', type=int)
@click.option('-da', default=0.5, type=float)
@click.option('-dv', default=0.5, type=float)
@click.option('-pix_th', default=0.10, type=float)
@click.option('-au_db', default=50, type=int)
def ffmpeg_process(name, trim, dev, da, dv, pix_th, au_db): # pragma: no cover
"""Simple manual test for ffmpeg_process with knobs to turn."""
n = find_offset_ffmpeg(name, trim=trim, dev=dev, duration_audio=da,
duration_video=dv, pix_th=pix_th, au_db=au_db)
click.echo(n)
return n
@cli.command()
@click.option('-t', default='scene marker', type=click.Choice(['cut', 'scene marker', 'mute', 'commercial break']),
help='What type of edl is this')
@click.option('-sp', '--save_path', default=None)
def create_edl_from_db(t, save_path): # pragma: no cover
with session_scope() as se:
db_items = se.query(Processed).all()
for item in db_items:
# Maybe remove this later?
if save_path:
loc = edl.create_edl_path(os.path.join(save_path, os.path.basename(item.location)))
else:
loc = item.location # handle remapping?
try:
t = edl.write_edl(loc, edl.db_to_edl(item, edl.TYPES[t]))
click.echo('Wrote %s' % t)
except:
LOG.exception('Failed to write edl.')
@cli.command()
@click.option('--name', default=None)
@click.option('--dur', default=600)
@click.option('--sample', default=None, type=int)
def add_hash_frame(name, dur, sample): # pragma: no cover
"""This will hash the episodes. We can later use this info to extract intro etc."""
all_items = []
p = Pool(4)
result = []
@log_exception
def to_db(media):
""" Just we can do the processing in a thread pool"""
imgz = []
for imghash, _, pos in hash_file(check_file_access(media), frame_range=False, end=dur):
img = Images(ratingKey=media.ratingKey,
hex=str(imghash),
hash=imghash.hash.tostring(),
grandparentRatingKey=media.grandparentRatingKey,
parentRatingKey=media.parentRatingKey,
offset=pos,
time=to_time(pos / 1000))
imgz.append(img)
return imgz
medias = find_all_movies_shows()
if sample is None:
if name:
medias = [s for s in medias if s.title.lower().startswith(name.lower())]
else:
medias = [s for s in medias if s.TYPE == 'show']
medias = choose('Select what item to process', medias, 'title')
for media in medias:
if media.TYPE == 'show':
eps = media.episodes()
eps = choose('Select episodes', eps, lambda x: '%s %s' % (x._prettyfilename(), x.title))
all_items += eps
else:
for show in [s for s in medias if s.TYPE == 'show']:
for season in show.seasons():
try:
all_items.extend(season.episodes()[:sample])
except: # pragma: no cover
pass
try:
# This might take a while so lets make it easy to interupt.
LOG.debug('Started to process %s items to the images table', len(all_items))
result = p.map(to_db, all_items, 1)
except KeyboardInterrupt:
pass
# Flatten the list.
result = list(itertools.chain(*result))
with session_scope() as ssee:
ssee.add_all(result)
@cli.command()
@click.option('--name', default=None)
@click.option('--conf', default=0.7, type=float)
def test_hashing_visual(name, conf): # pragma: no cover
from bw_plex.tools import visulize_intro_from_hashes
medias = find_all_movies_shows()
all_items = []
if name:
medias = [s for s in medias if s.title.lower().startswith(name.lower())]
else:
medias = [s for s in medias if s.TYPE == 'show']
medias = choose('Select what item to process', medias, 'title')
for media in medias:
if media.TYPE == 'show':
eps = media.episodes()
eps = choose('Select episodes', eps, lambda x: '%s %s' % (x._prettyfilename(), x.title))
all_items += eps
assert len(all_items) == 1, 'visulize_intro_from_hashes only works on one file at the time'
def find_intro_from_hexes_in_db(item):
d = defaultdict(set)
new_hex = []
stuff = []
with session_scope() as se:
eps = se.execute('select count(distinct ratingKey) from images where grandparentRatingKey = %s and parentRatingKey = %s' % (item.grandparentRatingKey, item.parentRatingKey))
eps = list(eps)[0][0]
LOG.debug('%s season %s has %s episodes', item.grandparentTitle, item.parentIndex, eps)
stuff = se.execute('select * from images where grandparentRatingKey = %s and parentRatingKey = %s' % (item.grandparentRatingKey, item.parentRatingKey))
stuff = list(stuff)
for s in stuff:
d[s.hex].add(s.ratingKey)
for k, v in d.items():
if len(v) >= float(eps * float(conf)):
new_hex.append(k)
LOG.debug('Found %s hashes that are in %s percent of the episodes (%s) in this season', len(new_hex), eps, 100 * conf)
return new_hex
hexes = find_intro_from_hexes_in_db(all_items[0])
visulize_intro_from_hashes(check_file_access(all_items[0]), hexes)
@cli.command()
@click.argument('fp')
@click.option('-t', type=click.Choice(['start', 'end']))
@click.option('--tvdbid')
@click.option('--timestamp', default=None)
@click.option('--gui', default=True)
def add_ref_frame(fp, t, tvdbid, timestamp, gui): # pragma: no cover
import cv2
if gui:
from bw_plex.tools import play
play(fp, key=tvdbid)
return
if fp.endswith(('.mp4', '.mkv', '.avi')) and timestamp:
cap = cv2.VideoCapture(fp)
ms = to_ms(timestamp)
cap.set(cv2.CAP_PROP_POS_MSEC, ms)
_, frame = cap.read()
else:
# So its a image...
frame = fp
frames_hash = create_imghash(frame)
# DUnno if this still is correct. using frames would be better.
frames_hex = ''.join(hex(i) for i in frames_hash.flatten()) # fixme?
with session_scope() as se:
try:
se.query(Reference_Frame).filter_by(hex=frames_hex).one()
click.echo('This frame already exist in the db')
except NoResultFound:
frm = Reference_Frame(hex=frames_hex,
type=t,
tvdbid=tvdbid)
se.add(frm)
LOG.debug('Added %s to Reference_Frame table hex %s tvdbid %s', fp, frames_hex, tvdbid)
@cli.command()
@click.option('-fp', default=None, help='where to create the config file.')
def create_config(fp=None): # pragma: no cover
"""Create a config file.
Args:
fp(str): Where to create the config file. If omitted it will be written
to the default location
Returns:
filepath to config.ini
"""
if fp is None:
fp = INI_FILE
conf_file = read_or_make(fp).filename
click.echo('Wrote configfile to %s' % conf_file)
return conf_file
@cli.command()
@click.argument('name')
@click.argument('url')
@click.option('-t', '--type', default=None, type=click.Choice(['manual', 'tvtunes', 'plex', 'youtube', 'all']))
@click.option('-rk', help='Add rating key', default='auto')
@click.option('-jt', '--just_theme', default=False, is_flag=True)
@click.option('-rot', '--remove_old_theme', default=False, is_flag=True)
def manually_correct_theme(name, url, type, rk, just_theme, remove_old_theme): # pragma: no cover
"""Set the correct fingerprint of the show in the hashes.db and
process the eps of that show in the db against the new theme fingerprint.
Args:
name (str): name of the show
url (str): the youtube/tvtunes url or filepath to | |
<gh_stars>1-10
#!/usr/bin/env python
#
# Copyright 2016 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A client that manages Google Compute Engine.
** ComputeClient **
ComputeClient is a wrapper around Google Compute Engine APIs.
It provides a set of methods for managing a google compute engine project,
such as creating images, creating instances, etc.
Design philosophy: We tried to make ComputeClient as stateless as possible,
and it only keeps states about authentication. ComputeClient should be very
generic, and only knows how to talk to Compute Engine APIs.
"""
# pylint: disable=too-many-lines
import copy
import functools
import getpass
import logging
import os
import re
import six
from acloud import errors
from acloud.internal import constants
from acloud.internal.lib import base_cloud_client
from acloud.internal.lib import utils
from acloud.internal.lib.ssh import IP
logger = logging.getLogger(__name__)
_MAX_RETRIES_ON_FINGERPRINT_CONFLICT = 10
_METADATA_KEY = "key"
_METADATA_KEY_VALUE = "value"
_SSH_KEYS_NAME = "sshKeys"
_ITEMS = "items"
_METADATA = "metadata"
_ZONE_RE = re.compile(r"^zones/(?P<zone>.+)")
# Quota metrics
_METRIC_CPUS = "CPUS"
_METRIC_DISKS_GB = "DISKS_TOTAL_GB"
_METRICS = [_METRIC_CPUS, _METRIC_DISKS_GB]
_USAGE = "usage"
_LIMIT = "limit"
# The minimum requirement to create an instance.
_REQUIRE_METRICS = {_METRIC_CPUS: 8, _METRIC_DISKS_GB: 1000}
BASE_DISK_ARGS = {
"type": "PERSISTENT",
"boot": True,
"mode": "READ_WRITE",
"autoDelete": True,
"initializeParams": {},
}
class OperationScope(object):
"""Represents operation scope enum."""
ZONE = "zone"
REGION = "region"
GLOBAL = "global"
class PersistentDiskType(object):
"""Represents different persistent disk types.
pd-standard for regular hard disk.
pd-ssd for solid state disk.
"""
STANDARD = "pd-standard"
SSD = "pd-ssd"
class ImageStatus(object):
"""Represents the status of an image."""
PENDING = "PENDING"
READY = "READY"
FAILED = "FAILED"
def _IsFingerPrintError(exc):
"""Determine if the exception is a HTTP error with code 412.
Args:
exc: Exception instance.
Returns:
Boolean. True if the exception is a "Precondition Failed" error.
"""
return isinstance(exc, errors.HttpError) and exc.code == 412
# pylint: disable=too-many-public-methods
class ComputeClient(base_cloud_client.BaseCloudApiClient):
"""Client that manages GCE."""
# API settings, used by BaseCloudApiClient.
API_NAME = "compute"
API_VERSION = "v1"
SCOPE = " ".join([
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_write"
])
# Default settings for gce operations
DEFAULT_INSTANCE_SCOPE = [
"https://www.googleapis.com/auth/androidbuild.internal",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write"
]
OPERATION_TIMEOUT_SECS = 30 * 60 # 30 mins
OPERATION_POLL_INTERVAL_SECS = 20
MACHINE_SIZE_METRICS = ["guestCpus", "memoryMb"]
ACCESS_DENIED_CODE = 403
def __init__(self, acloud_config, oauth2_credentials):
"""Initialize.
Args:
acloud_config: An AcloudConfig object.
oauth2_credentials: An oauth2client.OAuth2Credentials instance.
"""
super(ComputeClient, self).__init__(oauth2_credentials)
self._project = acloud_config.project
def _GetOperationStatus(self, operation, operation_scope, scope_name=None):
"""Get status of an operation.
Args:
operation: An Operation resource in the format of json.
operation_scope: A value from OperationScope, "zone", "region",
or "global".
scope_name: If operation_scope is "zone" or "region", this should be
the name of the zone or region, e.g. "us-central1-f".
Returns:
Status of the operation, one of "DONE", "PENDING", "RUNNING".
Raises:
errors.DriverError: if the operation fails.
"""
operation_name = operation["name"]
if operation_scope == OperationScope.GLOBAL:
api = self.service.globalOperations().get(
project=self._project, operation=operation_name)
result = self.Execute(api)
elif operation_scope == OperationScope.ZONE:
api = self.service.zoneOperations().get(
project=self._project,
operation=operation_name,
zone=scope_name)
result = self.Execute(api)
elif operation_scope == OperationScope.REGION:
api = self.service.regionOperations().get(
project=self._project,
operation=operation_name,
region=scope_name)
result = self.Execute(api)
if result.get("error"):
errors_list = result["error"]["errors"]
raise errors.DriverError(
"Get operation state failed, errors: %s" % str(errors_list))
return result["status"]
def WaitOnOperation(self, operation, operation_scope, scope_name=None):
"""Wait for an operation to finish.
Args:
operation: An Operation resource in the format of json.
operation_scope: A value from OperationScope, "zone", "region",
or "global".
scope_name: If operation_scope is "zone" or "region", this should be
the name of the zone or region, e.g. "us-central1-f".
"""
timeout_exception = errors.GceOperationTimeoutError(
"Operation hits timeout, did not complete within %d secs." %
self.OPERATION_TIMEOUT_SECS)
utils.PollAndWait(
func=self._GetOperationStatus,
expected_return="DONE",
timeout_exception=timeout_exception,
timeout_secs=self.OPERATION_TIMEOUT_SECS,
sleep_interval_secs=self.OPERATION_POLL_INTERVAL_SECS,
operation=operation,
operation_scope=operation_scope,
scope_name=scope_name)
def GetProject(self):
"""Get project information.
Returns:
A project resource in json.
"""
api = self.service.projects().get(project=self._project)
return self.Execute(api)
def GetRegionInfo(self):
"""Get region information that includes all quotas limit.
The region info example:
{"items":
[{"status": "UP",
"name": "asia-east1",
"quotas":
[{"usage": 92, "metric": "CPUS", "limit": 100},
{"usage": 640, "metric": "DISKS_TOTAL_GB", "limit": 10240},
...]]}
}
Returns:
A region resource in json.
"""
api = self.service.regions().list(project=self._project)
return self.Execute(api)
@staticmethod
def GetMetricQuota(regions_info, zone, metric):
"""Get CPU quota limit in specific zone and project.
Args:
regions_info: Dict, regions resource in json.
zone: String, name of zone.
metric: String, name of metric, e.g. "CPUS".
Returns:
A dict of quota information. Such as
{"usage": 100, "metric": "CPUS", "limit": 200}
"""
for region_info in regions_info["items"]:
if region_info["name"] in zone:
for quota in region_info["quotas"]:
if quota["metric"] == metric:
return quota
logger.info("Can't get %s quota info from zone(%s)", metric, zone)
return None
def EnoughMetricsInZone(self, zone):
"""Check the zone have enough metrics to create instance.
The metrics include CPUS and DISKS.
Args:
zone: String, name of zone.
Returns:
Boolean. True if zone have enough quota.
"""
regions_info = self.GetRegionInfo()
for metric in _METRICS:
quota = self.GetMetricQuota(regions_info, zone, metric)
if not quota:
logger.debug(
"Can't query the metric(%s) in zone(%s)", metric, zone)
return False
if quota[_LIMIT] - quota[_USAGE] < _REQUIRE_METRICS[metric]:
logger.debug(
"The metric(%s) is over limit in zone(%s)", metric, zone)
return False
return True
def GetDisk(self, disk_name, zone):
"""Get disk information.
Args:
disk_name: A string.
zone: String, name of zone.
Returns:
An disk resource in json.
https://cloud.google.com/compute/docs/reference/latest/disks#resource
"""
api = self.service.disks().get(
project=self._project, zone=zone, disk=disk_name)
return self.Execute(api)
def CheckDiskExists(self, disk_name, zone):
"""Check if disk exists.
Args:
disk_name: A string
zone: String, name of zone.
Returns:
True if disk exists, otherwise False.
"""
try:
self.GetDisk(disk_name, zone)
exists = True
except errors.ResourceNotFoundError:
exists = False
logger.debug("CheckDiskExists: disk_name: %s, result: %s", disk_name,
exists)
return exists
def CreateDisk(self,
disk_name,
source_image,
size_gb,
zone,
source_project=None,
disk_type=PersistentDiskType.STANDARD):
"""Create a gce disk.
Args:
disk_name: String
source_image: String, name of the image.
size_gb: Integer, size in gb.
zone: String, name of the zone, e.g. us-central1-b.
source_project: String, required if the image is located in a different
project.
disk_type: String, a value from PersistentDiskType, STANDARD
for regular hard disk or SSD for solid state disk.
"""
source_project = source_project or self._project
source_image = "projects/%s/global/images/%s" % (
source_project, source_image) if source_image else None
logger.info("Creating disk %s, size_gb: %d, source_image: %s",
disk_name, size_gb, str(source_image))
body = {
"name": disk_name,
"sizeGb": size_gb,
"type": "projects/%s/zones/%s/diskTypes/%s" % (self._project, zone,
disk_type),
}
api = self.service.disks().insert(
project=self._project,
sourceImage=source_image,
zone=zone,
body=body)
operation = self.Execute(api)
try:
self.WaitOnOperation(
operation=operation,
operation_scope=OperationScope.ZONE,
scope_name=zone)
except errors.DriverError:
logger.error("Creating disk failed, cleaning up: %s", disk_name)
if self.CheckDiskExists(disk_name, zone):
self.DeleteDisk(disk_name, zone)
raise
logger.info("Disk %s has been created.", disk_name)
def DeleteDisk(self, disk_name, zone):
"""Delete a gce disk.
Args:
disk_name: A string, name of disk.
zone: A string, name of zone.
"""
logger.info("Deleting disk %s", disk_name)
api = self.service.disks().delete(
project=self._project, zone=zone, disk=disk_name)
operation = self.Execute(api)
self.WaitOnOperation(
operation=operation,
operation_scope=OperationScope.ZONE,
scope_name=zone)
logger.info("Deleted disk %s", disk_name)
def DeleteDisks(self, disk_names, zone):
"""Delete multiple disks.
Args:
disk_names: A list of disk names.
zone: A string, name of zone.
Returns:
A tuple, (deleted, failed, error_msgs)
deleted: A list of names of disks that have been deleted.
failed: A list of names of disks that we fail to delete.
error_msgs: A list of failure messages.
"""
if not disk_names:
logger.warning("Nothing to delete. Arg disk_names is not provided.")
return [], [], []
# Batch send deletion requests.
logger.info("Deleting disks: %s", disk_names)
delete_requests = {}
for disk_name in set(disk_names):
request = self.service.disks().delete(
project=self._project, disk=disk_name, zone=zone)
delete_requests[disk_name] = request
return self._BatchExecuteAndWait(
delete_requests, OperationScope.ZONE, scope_name=zone)
def ListDisks(self, zone, disk_filter=None):
"""List disks.
Args:
zone: A string, representing zone name. e.g. "us-central1-f"
disk_filter: A string representing a filter in format of
FIELD_NAME COMPARISON_STRING LITERAL_STRING
e.g. "name ne example-instance"
e.g. "name eq "example-instance-[0-9]+""
Returns:
A list of disks.
"""
return self.ListWithMultiPages(
api_resource=self.service.disks().list,
project=self._project,
zone=zone,
filter=disk_filter)
def CreateImage(self,
image_name,
source_uri=None,
source_disk=None,
labels=None):
"""Create a Gce image.
Args:
image_name: String, name of image
source_uri: Full Google Cloud Storage URL where the disk image is
stored. e.g. "https://storage.googleapis.com/my-bucket/
avd-system-2243663.tar.gz"
source_disk: String, this should be the disk's selfLink value
(including zone and project), rather than the disk_name
e.g. https://www.googleapis.com/compute/v1/projects/
google.com:android-builds-project/zones/
us-east1-d/disks/<disk_name>
labels: Dict, will be added to the image's labels.
Raises:
errors.DriverError: | |
"""Core classes and functions for GuideMaker."""
import os
import re
import yaml
import logging
import gzip
import hashlib
import statistics
import nmslib
import regex
import gc
from typing import List, Dict, TypeVar, Generator
from itertools import product
from Bio import SeqIO
from Bio.SeqUtils import GC
from pybedtools import BedTool
from Bio import Seq
from copy import deepcopy
import pandas as pd
import numpy as np
import altair as alt
from guidemaker import doench_predict
from guidemaker import cfd_score_calculator
logger = logging.getLogger(__name__)
PandasDataFrame = TypeVar('pandas.core.frame.DataFrame')
pd.options.mode.chained_assignment = None
def is_gzip(filename: str):
try:
with open(filename, "rb") as f:
logger.info("check if %s is gzipped" % filename)
return f.read(2) == b'\x1f\x8b'
except IOError as e:
logger.error("Could not open the file %s to determine if it was gzipped" % filename)
raise e
class PamTarget:
"""
A Class representing a Protospacer Adjacent Motif (PAM) and targets.
The classincludes all targets for given PAM as a dataframe,PAM and target attributes,
and methods to find target and control sequences.
"""
def __init__(self, pam: str, pam_orientation: str, dtype: str) -> None:
"""
Pam __init__
Args:
pam (str): A DNA string in ambiguous IUPAC format
pam_orientation (str): [5prime | 3prime ]
5prime means the order is 5'-[pam][target]-3'
3prime means the order is 5'-[target][pam]-3'
dtype (str): hamming or leven
Returns:
None
"""
for letter in pam.upper():
assert letter in ['A', 'C', 'G', 'T', 'M', 'R', 'W',
'S', 'Y', 'K', 'V', 'H', 'D', 'B', 'X', 'N']
assert pam_orientation in ["3prime", "5prime"]
self.pam: str = pam.upper()
self.pam_orientation: str = pam_orientation
self.dtype: str = dtype
def __str__(self) -> str:
"""
str __init__
Args:
self
Returns:
self(str)
"""
return "A PAM object: {self.pam}".format(self=self)
def find_targets(self, seq_record_iter: object, target_len: int) -> PandasDataFrame:
"""
Find all targets on a sequence that match for the PAM on both strand(s)
Args:
seq_record_iter (object): A Biopython SeqRecord iterator from SeqIO.parse
target_len (int): The length of the target sequence
Returns:
PandasDataFrame: A pandas dataframe with of matching targets
"""
def reverse_complement(seq: str) -> str:
"""
Reverse complement of the PAM sequence
Args:
seq (str): A DNA string
Returns:
str: A reverse complement of DNA string
"""
bpseq = Seq.Seq(seq)
return str(bpseq.reverse_complement())
def pam2re(pam: str) -> str:
"""
Convert an IUPAC ambiguous PAM to a Regex expression
Args:
pam (str): A DNA string
Returns:
str: A Regex expression
"""
dnaval = {'A': 'A', 'C': 'C', 'G': 'G', 'T': 'T',
'M': '[A|C]', 'R': '[A|G]', 'W': '[A|T]', 'S': '[C|G]',
'Y': '[C|T]', 'K': '[G|T]', 'V': '[A|C|G]', 'H': '[A|C|T]',
'D': '[A|G|T]', 'B': '[C|G|T]', 'X': '[G|A|T|C]', 'N': '[G|A|T|C]'}
return "".join([dnaval[base] for base in pam])
# 5prime means the order is 5'-[pam][target]-3'
# 3prime means the order is 5'-[target][pam]-3'
def check_target(seq: str, target_len: int) -> bool:
"""
Check targets for guidelength and DNA bases
Args:
seq (str): A DNA string
target_len(int): Guide length
Returns:
bool: True or False
"""
if len(seq) == target_len and all(letters in ['A', 'T', 'C', 'G'] for letters in seq): # if not ATCG in the target then ignore those targets
return True
return False
def run_for_5p(pam_pattern: str, dnaseq: str, target_len: int) -> Generator:
"""
Search for guides with 5prime pam orientation in the forward strand
Args:
pam_pattern (str): A DNA string representing PAM
dnaseq (str): A DNA string representing genome
target_len (int): Guide length
Returns:
(Generator): A generator with target_seq, exact_pam, start, stop, strand, and pam_orientation
"""
for match_obj in regex.finditer(pattern=pam_pattern, string=dnaseq, overlapped=True):
target_seq = dnaseq[match_obj.end(): match_obj.end() + target_len]
target_seq30 = dnaseq[match_obj.start()-3: match_obj.start()+27]
## 5'-[guide of 25 nt][exact pam, 3nt][next two]-3'
if check_target(target_seq, target_len):
exact_pam = match_obj.group(0)
start = match_obj.end()
stop = match_obj.end() + target_len
# 5prime =True, 3prime = False
pam_orientation = True
# forward =True, reverse = False
strand = True
yield target_seq, exact_pam, start, stop, strand, pam_orientation, target_seq30
def run_for_3p(pam_pattern, dnaseq, target_len) -> Generator:
"""
Search for guides with 3prime pam orientation in the reverse strand
Args:
pam_pattern (str): A DNA string representing PAM
dnaseq (str): A DNA string representing genome
target_len (int): Guide length
Returns:
(Generator): A generator with target_seq, exact_pam, start, stop, strand, and pam_orientation
"""
for match_obj in regex.finditer(pattern=pam_pattern, string=dnaseq, overlapped=True):
target_seq = dnaseq[match_obj.start() - target_len: match_obj.start()]
target_seq30 = dnaseq[match_obj.end()-27 :match_obj.end()+3]
if check_target(target_seq, target_len):
exact_pam = match_obj.group(0)
start = match_obj.start() - target_len
stop = match_obj.start()
# 5prime =True, 3prime = False
pam_orientation = False
# forward =True, reverse = False
strand = True
yield target_seq, exact_pam, start, stop, strand, pam_orientation, target_seq30
def run_rev_5p(pam_pattern, dnaseq, target_len) -> Generator:
"""
Search for guides with 5prime pam orientation in the reverse strand
Args:
pam_pattern (str): A DNA string representing PAM
dnaseq (str): A DNA string representing genome
target_len (int): Guide length
Returns:
(Generator): A generator with target_seq, exact_pam, start, stop, strand, and pam_orientation
"""
for match_obj in regex.finditer(pattern=pam_pattern, string=dnaseq, overlapped=True):
target_seq = reverse_complement(
dnaseq[match_obj.start() - target_len: match_obj.start()])
target_seq30 = reverse_complement(
dnaseq[match_obj.end()-27:match_obj.end()+3])
if check_target(target_seq, target_len):
exact_pam = reverse_complement(match_obj.group(0))
start = match_obj.start() - target_len
stop = match_obj.start()
# 5prime =True, 3prime = False
pam_orientation = True
# forward =True, reverse = False
strand = False
yield target_seq, exact_pam, start, stop, strand, pam_orientation, target_seq30
def run_rev_3p(pam_pattern, dnaseq, target_len) -> Generator:
"""
Search for guides with 3prime pam orientation in the reverse strand
Args:
pam_pattern (str): A DNA string representing PAM
dnaseq (str): A DNA string representing genome
target_len (int): Guide length
Returns:
(Generator): A generator with target_seq, exact_pam, start, stop, strand, and pam_orientation
"""
for match_obj in regex.finditer(pattern=pam_pattern, string=dnaseq, overlapped=True):
target_seq = reverse_complement(
dnaseq[match_obj.end(): match_obj.end() + target_len])
target_seq30 = reverse_complement(dnaseq[match_obj.start()-3:match_obj.start()+27])
if check_target(target_seq, target_len):
exact_pam = reverse_complement(match_obj.group(0))
start = match_obj.end()
stop = match_obj.end() + target_len
# 5prime =True, 3prime = False
pam_orientation = False
# forward =True, reverse = False
strand = False
yield target_seq, exact_pam, start, stop, strand, pam_orientation, target_seq30
target_list = []
for record in seq_record_iter:
record_id = record.id
seq = str(record.seq)
if self.pam_orientation == "5prime":
# forward
for5p = pd.DataFrame(run_for_5p(pam2re(self.pam), seq, target_len), columns=[
"target", "exact_pam", "start", "stop", "strand", "pam_orientation", "target_seq30"])
for5p["seqid"] = record_id
# string to boolean conversion is not straight - as all string were set to Trues- so change the encoding in functions above.
# https://stackoverflow.com/questions/715417/converting-from-a-string-to-boolean-in-python/715455#715455
for5p = for5p.astype({"target": 'str', "exact_pam": 'category', "start": 'uint32',
"stop": 'uint32', "strand": 'bool', "pam_orientation": 'bool', "seqid": 'category'})
target_list.append(for5p)
# reverse
rev5p = pd.DataFrame(run_rev_5p(pam2re(reverse_complement(self.pam)), seq, target_len), columns=[
"target", "exact_pam", "start", "stop", "strand", "pam_orientation","target_seq30"])
rev5p["seqid"] = record_id
rev5p = rev5p.astype({"target": 'str', "exact_pam": 'category', "start": 'uint32',
"stop": 'uint32', "strand": 'bool', "pam_orientation": 'bool', "seqid": 'category'})
target_list.append(rev5p)
# Question? Append directly vs. concat then append? https://ravinpoudel.github.io/AppendVsConcat/
elif self.pam_orientation == "3prime":
# forward
for3p = pd.DataFrame(run_for_3p(pam2re(self.pam), seq, target_len), columns=[
"target", "exact_pam", "start", "stop", "strand", "pam_orientation","target_seq30"])
for3p["seqid"] = record_id
for3p = for3p.astype({"target": 'str', "exact_pam": 'category', "start": 'uint32',
"stop": 'uint32', "strand": 'bool', "pam_orientation": 'bool', "seqid": 'category'})
target_list.append(for3p)
# reverse
rev3p = pd.DataFrame(run_rev_3p(pam2re(reverse_complement(self.pam)), seq, target_len), columns=[
"target", "exact_pam", "start", "stop", "strand", "pam_orientation","target_seq30"])
rev3p["seqid"] = record_id
rev3p = rev3p.astype({"target": 'str', "exact_pam": 'category', "start": 'uint32',
"stop": 'uint32', "strand": 'bool', "pam_orientation": 'bool', "seqid": 'category'})
target_list.append(rev3p)
gc.collect() # clear memory after each chromosome
df_targets = pd.concat(target_list, ignore_index=True)
df_targets = df_targets.assign(seedseq=np.nan, hasrestrictionsite=np.nan, isseedduplicated=np.nan)
df_targets = df_targets.astype({"seedseq": 'str', "isseedduplicated": 'bool'})
df_targets = df_targets.assign(dtype=self.dtype)
df_targets = df_targets.astype({"dtype": 'category'})
return df_targets
class TargetProcessor:
"""
A Class representing a set of guide RNA targets.
The class includes all targets in a dataframe, methods to process target and a dict with edit distances for sequences.
"""
def __init__(self, targets: PandasDataFrame, lsr: int, editdist: int = 2, knum: int = 2) -> None:
"""
TargetProcessor __init__
Args:
targets (PandasDataFrame): Dataframe with output from class PamTarget
lsr (int): Length of seed region
editdist (int): Edit distance
knum (int): Number of negative controls
Returns:
None
"""
self.targets = targets # pandas dataframe
self.lsr: int = lsr # length of seed region
self.editdist: int = editdist
self.knum: int = knum
self.nmslib_index: object = None
self.neighbors: dict = {}
self.closest_neighbor_df: PandasDataFrame = None
self.ncontrolsearched: int = None
self.gc_percent: float = None
self.genomesize: float = None
self.pam_orientation: bool = targets['pam_orientation'].iat[0]
def __str__(self) -> None:
"""
str __init__
Args:
self
Return:
None
"""
info = "TargetList: contains a set of {} potential PAM targets".format(len(self.targets))
return info
def __len__(self) -> | |
<gh_stars>1-10
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to handle tasks related to string encoding.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import threading
import nltk
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import tokenizer as t2t_tokenizer
import tensorflow.compat.v1 as tf # tf
from seq2act.data_generation import create_token_vocab
from seq2act.data_generation import resources
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'token_type', 't2t_subtoken',
['simple', 'nltk_token', 't2t_subtoken', 't2t_token'],
'The way to represent words: by using token and char or by subtoken')
embed_dict = {}
# Singleton encoder to do subtokenize, which loads vocab file only once.
# Please use _get_subtoken_encoder() to get this singleton instance.
_subtoken_encoder = None
_token_vocab = None
lock = threading.Lock()
class EmptyTextError(ValueError):
pass
class CharPosError(ValueError):
pass
class UnknownTokenError(ValueError):
pass
def _get_subtoken_encoder():
with lock:
global _subtoken_encoder
if not _subtoken_encoder:
_subtoken_encoder = text_encoder.SubwordTextEncoder(
resources.get_vocab_file())
return _subtoken_encoder
def _get_token_vocab():
with lock:
global _token_vocab
if not _token_vocab:
_token_vocab = {}
tokens, _, _ = create_token_vocab.read_vocab(resources.get_vocab_file())
_token_vocab = dict(zip(tokens, range(len(tokens))))
return _token_vocab
def subtokenize_to_ids(text):
"""Subtokenizes text string to subtoken ids according to vocabulary."""
return _get_subtoken_encoder().encode(text)
def t2t_tokenize_to_ids(text):
"""Tokenize text string with tensor2tensor tokenizer."""
token_vocab = _get_token_vocab()
tokens = t2t_tokenizer.encode(text)
token_ids = []
for token in tokens:
if token not in token_vocab:
raise UnknownTokenError('Unknown token %s' % token)
else:
token_ids.append(token_vocab[token])
return token_ids, tokens
stat_fix_dict = collections.defaultdict(int)
def _fix_char_position(text, start, end):
"""Fixes char position by extending the substring.
In text_encoder.SubwordTextEncoder, alphanumeric chars vs non-alphanumeric
will be splited as 2 different categories in token level, like:
abc "settings" def ->
0) abc
1) space"
2) settings
3) "space
4) def
So if the substring specified by start/end is <"settings">, then its tokens:
0) "
1) settings
2) "
will mismatch the tokens of whole text, because <"> != <space">
Solution is extenting the substring: if the first char is non-alphanumeric and
the previous char is also non-alphanumeric, then move start backforward. Do
same on the end position.
Args:
text: whole text.
start: char level start position.
end: char level end position (exclusive).
Returns:
start: fixed start position.
end: fixed end position (exclusive).
"""
original_start, original_end = start, end
if text[start: end].strip(): # Do trim if the subtext is more than spaces
while text[start] == ' ':
start += 1
while text[end-1] == ' ':
end -= 1
def same_category(a, b):
return a.isalnum() and b.isalnum() or not a.isalnum() and not b.isalnum()
while start > 0 and same_category(text[start-1], text[start]):
start -= 1
while end < len(text) and same_category(text[end-1], text[end]):
end += 1
edit_distance = abs(start - original_start) + abs(end - original_end)
stat_fix_dict[edit_distance] += 1
return start, end
def get_t2t_token_pos_from_char_pos(text, start, end):
"""Converts char level position to t2t token/subtoken level position.
Example: please click "settings" app.
| |
char-level: start end
Tokens: [u'please', u'click', u' "', u'settings', u'app', u'"', u'.']
|____________________| |
prev tokens curr tokens
The start/end position of curr tokens should be (3, 4).
'3' is calculated by counting the tokens of prev tokens.
Args:
text: whole text.
start: char level start position.
end: char level end position (exclusive).
Returns:
token_start, token_end: token level start/end position.
Raises:
ValueError: Empty token or wrong index to search in text.
"""
if start < 0 or end > len(text):
raise CharPosError('Position annotation out of the boundaries of text.')
start, end = _fix_char_position(text, start, end)
tokens, _ = tokenize_to_ids(text)
prev, _ = tokenize_to_ids(text[0:start])
curr, _ = tokenize_to_ids(text[start:end])
if curr == tokens[len(prev): len(prev) + len(curr)]:
return len(prev), len(prev) + len(curr)
space = tokenize_to_ids(' ')[0][0] # subtoken for space ' _'
# try ignore the last token(' ') of prev tokens.
if prev[-1] == space and curr == tokens[len(prev)-1: len(prev) + len(curr)-1]:
return len(prev)-1, len(prev) + len(curr)-1
if text[start: end] == ' ':
raise EmptyTextError('Single space between words will be ignored.')
assert False, 'Fail to locate start/end positions in text'
def text_sequence_to_ids(text_seq, vocab_idx_dict):
"""Encodes list of words into word id sequence and character id sequence.
Retrieves words' index and char's ascii code as encoding. If word is not
contained in vocab_idx_dict, len(vocab_idx_dict) is the word's encoding
number.
For Example:
vocab_idx_dict = {'hi':0, 'hello':1, 'apple':2}
text_sequence_to_ids(['hello', 'world'], vocab_idx_dict) returns:
word_ids = [1, 3]
char_ids = [[104, 101, 108, 108, 111], [119, 111, 114, 108, 100]]
Args:
text_seq: list of words to be encoded
vocab_idx_dict: a dictionary, keys are vocabulary, values are words' index
Returns:
word_ids: A 1d list of intergers, encoded word id sequence
char_ids: A 2d list of integers, encoded char id sequence
"""
word_ids = [
vocab_idx_dict[word.lower()]
if word.lower() in vocab_idx_dict else len(vocab_idx_dict)
for word in text_seq
]
char_ids = []
for word in text_seq:
char_ids.append([ord(ch) for ch in word.lower()])
return word_ids, char_ids
def tokenizer_with_punctuation(origin_string):
"""Extracts tokens including punctuation from origial string."""
tokens = nltk.word_tokenize(origin_string)
# Note: nltk changes: left double quote to `` and right double quote to ''.
# As we don't need this feature, so change them back to origial quotes
tokens = ['"' if token == '``' or token == '\'\'' else token
for token in tokens]
result = []
for token in tokens:
# nltk will separate " alone, which is good. But:
# nltk will keep ' together with neightbor word, we need split the ' in head
# tai. If ' is in middle of a word, leave it unchanged, like n't.
# Example:
# doesn't -> 2 tokens: does, n't.
# 'settings' -> 3 tokens: ', setting, '.
if token == '\'':
result.append(token)
elif token.startswith('\'') and token.endswith('\''):
result.extend(['\'', token[1:-1], '\''])
elif token.startswith('\''):
result.extend(['\'', token[1:]])
elif token.endswith('\''):
result.extend([token[:-1], '\''])
# nltk keeps abbreviation like 'ok.' as single word, so split tailing dot.
elif len(token) > 1 and token.endswith('.'):
result.extend([token[:-1], '.'])
else:
result.append(token)
# Now nltk will split https://caldav.calendar.yahoo.com to
# 'https', ':', '//caldav.calendar.yahoo.com'
# Combine them together:
tokens = result
result = []
i = 0
while i < len(tokens):
if (i < len(tokens) -2 and
tokens[i] in ['http', 'https'] and
tokens[i+1] == ':' and
tokens[i+2].startswith('//')):
result.append(tokens[i] + tokens[i+1] + tokens[i+2])
i += 3
else:
result.append(tokens[i])
i += 1
return result
def tokenizer(action_str):
"""Extracts token from action string.
Removes punctuation, extra space and changes all words to lower cases.
Args:
action_str: the action string.
Returns:
action_str_tokens: A list of clean tokens.
"""
action_str_no_punc = re.sub(r'[^\w\s]|\n', ' ', action_str).strip()
tokens = action_str_no_punc.split(' ')
action_str_tokens = [token for token in tokens if token]
return action_str_tokens
def is_ascii_str(token_str):
"""Checks if the given token string is construced with all ascii chars.
Args:
token_str: A token string.
Returns:
A boolean to indicate if the token_str is ascii string or not.
"""
return all(ord(token_char) < 128 for token_char in token_str)
def replace_non_ascii(text, replace_with=' '):
"""Replaces all non-ASCII chars in strinng."""
return ''.join([i if ord(i) < 128 else replace_with for i in text])
def get_index_of_list_in_list(base_list, the_sublist,
start_pos=0, lookback_pivot=None):
"""Gets the start and end(exclusive) indexes of a sublist in base list.
Examples:
call with (['00', '.', '22', '33', '44'. '.' '66'], ['22', '33'], 3)
raise ValueError # Search from 3rd and never lookback.
call with (['00', '.', '22', '33', '44'. '.' '66'], ['22', '33'], 3, '.')
return (2, 4) # Search from 3rd and lookback until previous dot('.')
Args:
base_list: list of str (or any other type), the base list.
the_sublist: list of str (or any other type), the sublist search for.
start_pos: the index to start search.
lookback_pivot: string. If not None, the start_pos will be moved backforward
until an item equal to lookback_pivot. If no previous item matchs
lookback_pivot, start_pos will be set at the beginning of base_list.
Returns:
int, int: the start and end indexes(exclusive) of the sublist in base list.
Raises:
ValueError: | |
# coding: utf-8
# Little utilities we use internally
from abc import ABCMeta
import os
import signal
import sys
import pathlib
from functools import wraps, update_wrapper
import typing as t
import threading
import collections
from async_generator import isasyncgen
from ._deprecate import warn_deprecated
import trio
# Equivalent to the C function raise(), which Python doesn't wrap
if os.name == "nt":
# On windows, os.kill exists but is really weird.
#
# If you give it CTRL_C_EVENT or CTRL_BREAK_EVENT, it tries to deliver
# those using GenerateConsoleCtrlEvent. But I found that when I tried
# to run my test normally, it would freeze waiting... unless I added
# print statements, in which case the test suddenly worked. So I guess
# these signals are only delivered if/when you access the console? I
# don't really know what was going on there. From reading the
# GenerateConsoleCtrlEvent docs I don't know how it worked at all.
#
# I later spent a bunch of time trying to make GenerateConsoleCtrlEvent
# work for creating synthetic control-C events, and... failed
# utterly. There are lots of details in the code and comments
# removed/added at this commit:
# https://github.com/python-trio/trio/commit/95843654173e3e826c34d70a90b369ba6edf2c23
#
# OTOH, if you pass os.kill any *other* signal number... then CPython
# just calls TerminateProcess (wtf).
#
# So, anyway, os.kill is not so useful for testing purposes. Instead
# we use raise():
#
# https://msdn.microsoft.com/en-us/library/dwwzkt4c.aspx
#
# Have to import cffi inside the 'if os.name' block because we don't
# depend on cffi on non-Windows platforms. (It would be easy to switch
# this to ctypes though if we ever remove the cffi dependency.)
#
# Some more information:
# https://bugs.python.org/issue26350
#
# Anyway, we use this for two things:
# - redelivering unhandled signals
# - generating synthetic signals for tests
# and for both of those purposes, 'raise' works fine.
import cffi
_ffi = cffi.FFI()
_ffi.cdef("int raise(int);")
_lib = _ffi.dlopen("api-ms-win-crt-runtime-l1-1-0.dll")
signal_raise = getattr(_lib, "raise")
else:
def signal_raise(signum):
signal.pthread_kill(threading.get_ident(), signum)
# See: #461 as to why this is needed.
# The gist is that threading.main_thread() has the capability to lie to us
# if somebody else edits the threading ident cache to replace the main
# thread; causing threading.current_thread() to return a _DummyThread,
# causing the C-c check to fail, and so on.
# Trying to use signal out of the main thread will fail, so we can then
# reliably check if this is the main thread without relying on a
# potentially modified threading.
def is_main_thread():
"""Attempt to reliably check if we are in the main thread."""
try:
signal.signal(signal.SIGINT, signal.getsignal(signal.SIGINT))
return True
except ValueError:
return False
######
# Call the function and get the coroutine object, while giving helpful
# errors for common mistakes. Returns coroutine object.
######
def coroutine_or_error(async_fn, *args):
def _return_value_looks_like_wrong_library(value):
# Returned by legacy @asyncio.coroutine functions, which includes
# a surprising proportion of asyncio builtins.
if isinstance(value, collections.abc.Generator):
return True
# The protocol for detecting an asyncio Future-like object
if getattr(value, "_asyncio_future_blocking", None) is not None:
return True
# This janky check catches tornado Futures and twisted Deferreds.
# By the time we're calling this function, we already know
# something has gone wrong, so a heuristic is pretty safe.
if value.__class__.__name__ in ("Future", "Deferred"):
return True
return False
try:
coro = async_fn(*args)
except TypeError:
# Give good error for: nursery.start_soon(trio.sleep(1))
if isinstance(async_fn, collections.abc.Coroutine):
# explicitly close coroutine to avoid RuntimeWarning
async_fn.close()
raise TypeError(
"Trio was expecting an async function, but instead it got "
"a coroutine object {async_fn!r}\n"
"\n"
"Probably you did something like:\n"
"\n"
" trio.run({async_fn.__name__}(...)) # incorrect!\n"
" nursery.start_soon({async_fn.__name__}(...)) # incorrect!\n"
"\n"
"Instead, you want (notice the parentheses!):\n"
"\n"
" trio.run({async_fn.__name__}, ...) # correct!\n"
" nursery.start_soon({async_fn.__name__}, ...) # correct!".format(
async_fn=async_fn
)
) from None
# Give good error for: nursery.start_soon(future)
if _return_value_looks_like_wrong_library(async_fn):
raise TypeError(
"Trio was expecting an async function, but instead it got "
"{!r} – are you trying to use a library written for "
"asyncio/twisted/tornado or similar? That won't work "
"without some sort of compatibility shim.".format(async_fn)
) from None
raise
# We can't check iscoroutinefunction(async_fn), because that will fail
# for things like functools.partial objects wrapping an async
# function. So we have to just call it and then check whether the
# return value is a coroutine object.
if not isinstance(coro, collections.abc.Coroutine):
# Give good error for: nursery.start_soon(func_returning_future)
if _return_value_looks_like_wrong_library(coro):
raise TypeError(
"Trio got unexpected {!r} – are you trying to use a "
"library written for asyncio/twisted/tornado or similar? "
"That won't work without some sort of compatibility shim.".format(coro)
)
if isasyncgen(coro):
raise TypeError(
"start_soon expected an async function but got an async "
"generator {!r}".format(coro)
)
# Give good error for: nursery.start_soon(some_sync_fn)
raise TypeError(
"Trio expected an async function, but {!r} appears to be "
"synchronous".format(getattr(async_fn, "__qualname__", async_fn))
)
return coro
class ConflictDetector:
"""Detect when two tasks are about to perform operations that would
conflict.
Use as a synchronous context manager; if two tasks enter it at the same
time then the second one raises an error. You can use it when there are
two pieces of code that *would* collide and need a lock if they ever were
called at the same time, but that should never happen.
We use this in particular for things like, making sure that two different
tasks don't call sendall simultaneously on the same stream.
"""
def __init__(self, msg):
self._msg = msg
self._held = False
def __enter__(self):
if self._held:
raise trio.BusyResourceError(self._msg)
else:
self._held = True
def __exit__(self, *args):
self._held = False
def async_wraps(cls, wrapped_cls, attr_name):
"""Similar to wraps, but for async wrappers of non-async functions."""
def decorator(func):
func.__name__ = attr_name
func.__qualname__ = ".".join((cls.__qualname__, attr_name))
func.__doc__ = """Like :meth:`~{}.{}.{}`, but async.
""".format(
wrapped_cls.__module__, wrapped_cls.__qualname__, attr_name
)
return func
return decorator
def fixup_module_metadata(module_name, namespace):
seen_ids = set()
def fix_one(qualname, name, obj):
# avoid infinite recursion (relevant when using
# typing.Generic, for example)
if id(obj) in seen_ids:
return
seen_ids.add(id(obj))
mod = getattr(obj, "__module__", None)
if mod is not None and mod.startswith("trio."):
obj.__module__ = module_name
# Modules, unlike everything else in Python, put fully-qualitied
# names into their __name__ attribute. We check for "." to avoid
# rewriting these.
if hasattr(obj, "__name__") and "." not in obj.__name__:
obj.__name__ = name
obj.__qualname__ = qualname
if isinstance(obj, type):
for attr_name, attr_value in obj.__dict__.items():
fix_one(objname + "." + attr_name, attr_name, attr_value)
for objname, obj in namespace.items():
if not objname.startswith("_"): # ignore private attributes
fix_one(objname, objname, obj)
class generic_function:
"""Decorator that makes a function indexable, to communicate
non-inferrable generic type parameters to a static type checker.
If you write::
@generic_function
def open_memory_channel(max_buffer_size: int) -> Tuple[
SendChannel[T], ReceiveChannel[T]
]: ...
it is valid at runtime to say ``open_memory_channel[bytes](5)``.
This behaves identically to ``open_memory_channel(5)`` at runtime,
and currently won't type-check without a mypy plugin or clever stubs,
but at least it becomes possible to write those.
"""
def __init__(self, fn):
update_wrapper(self, fn)
self._fn = fn
def __call__(self, *args, **kwargs):
return self._fn(*args, **kwargs)
def __getitem__(self, _):
return self
# If a new class inherits from any ABC, then the new class's metaclass has to
# inherit from ABCMeta. If a new class inherits from typing.Generic, and
# you're using Python 3.6, then the new class's metaclass has to
# inherit from typing.GenericMeta. Some of the classes that want to use Final
# or NoPublicConstructor inherit from ABCs and generics, so Final has to
# inherit from these metaclasses. Fortunately, GenericMeta inherits from
# ABCMeta, so inheriting from GenericMeta alone is sufficient (when it
# exists at all).
if not t.TYPE_CHECKING and hasattr(t, "GenericMeta"):
BaseMeta = t.GenericMeta
else:
BaseMeta = ABCMeta
class Final(BaseMeta):
"""Metaclass that enforces a class to be final (i.e., subclass not allowed).
If a class uses this metaclass like this::
class SomeClass(metaclass=Final):
pass
The metaclass will ensure that no sub class can be created.
Raises
------
- TypeError if a sub class is created
"""
def __new__(cls, name, bases, cls_namespace):
for base in bases:
if isinstance(base, Final):
raise TypeError(
f"{base.__module__}.{base.__qualname__} does not support subclassing"
)
return super().__new__(cls, name, bases, cls_namespace)
T = t.TypeVar("T")
class NoPublicConstructor(Final):
"""Metaclass that | |
<reponame>melinath/philo
#encoding: utf-8
import datetime
from hashlib import sha1
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.db.models.options import get_verbose_name as convert_camelcase
from django.utils import simplejson as json
from django.utils.http import urlquote_plus
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.template import loader, Context, Template, TemplateDoesNotExist
from philo.contrib.sobol.utils import make_tracking_querydict
from philo.utils.registry import Registry
if getattr(settings, 'SOBOL_USE_EVENTLET', False):
try:
from eventlet.green import urllib2
except:
import urllib2
else:
import urllib2
__all__ = (
'Result', 'BaseSearch', 'DatabaseSearch', 'URLSearch', 'JSONSearch', 'GoogleSearch', 'registry', 'get_search_instance'
)
SEARCH_CACHE_SEED = 'philo_sobol_search_results'
USE_CACHE = getattr(settings, 'SOBOL_USE_CACHE', True)
#: A registry for :class:`BaseSearch` subclasses that should be available in the admin.
registry = Registry()
def _make_cache_key(search, search_arg):
return sha1(SEARCH_CACHE_SEED + search.slug + search_arg).hexdigest()
def get_search_instance(slug, search_arg):
"""Returns a search instance for the given slug, either from the cache or newly-instantiated."""
search = registry[slug]
search_arg = search_arg.lower()
if USE_CACHE:
key = _make_cache_key(search, search_arg)
cached = cache.get(key)
if cached:
return cached
instance = search(search_arg)
instance.slug = slug
return instance
class Result(object):
"""
:class:`Result` is a helper class that, given a search and a result of that search, is able to correctly render itself with a template defined by the search. Every :class:`Result` will pass a ``title``, a ``url`` (if applicable), and the raw ``result`` returned by the search into the template context when rendering.
:param search: An instance of a :class:`BaseSearch` subclass or an object that implements the same API.
:param result: An arbitrary result from the ``search``.
"""
def __init__(self, search, result):
self.search = search
self.result = result
def get_title(self):
"""Returns the title of the result by calling :meth:`BaseSearch.get_result_title` on the raw result."""
return self.search.get_result_title(self.result)
def get_url(self):
"""Returns the url of the result or ``None`` by calling :meth:`BaseSearch.get_result_url` on the raw result. This url will contain a querystring which, if used, will track a :class:`.Click` for the actual url."""
return self.search.get_result_url(self.result)
def get_actual_url(self):
"""Returns the actual url of the result by calling :meth:`BaseSearch.get_actual_result_url` on the raw result."""
return self.search.get_actual_result_url(self.result)
def get_content(self):
"""Returns the content of the result by calling :meth:`BaseSearch.get_result_content` on the raw result."""
return self.search.get_result_content(self.result)
def get_template(self):
"""Returns the template which will be used to render the :class:`Result` by calling :meth:`BaseSearch.get_result_template` on the raw result."""
return self.search.get_result_template(self.result)
def get_context(self):
"""
Returns the context dictionary for the result. This is used both in rendering the result and in the AJAX return value for :meth:`.SearchView.ajax_api_view`. The context will contain the following keys:
title
The result of calling :meth:`get_title`
url
The result of calling :meth:`get_url`
content
The result of calling :meth:`get_content`
"""
if not hasattr(self, '_context'):
self._context = {
'title': self.get_title(),
'url': self.get_url(),
'actual_url': self.get_actual_url(),
'content': self.get_content()
}
return self._context
def render(self):
"""Returns the template from :meth:`get_template` rendered with the context from :meth:`get_context`."""
t = self.get_template()
c = Context(self.get_context())
return t.render(c)
def __unicode__(self):
"""Returns :meth:`render`"""
return self.render()
class BaseSearchMetaclass(type):
def __new__(cls, name, bases, attrs):
if 'verbose_name' not in attrs:
attrs['verbose_name'] = capfirst(' '.join(convert_camelcase(name).rsplit(' ', 1)[:-1]))
if 'slug' not in attrs:
attrs['slug'] = name[:-6].lower() if name.endswith("Search") else name.lower()
return super(BaseSearchMetaclass, cls).__new__(cls, name, bases, attrs)
class BaseSearch(object):
"""
Defines a generic search api. Accessing :attr:`results` will attempt to retrieve cached results and, if that fails, will initiate a new search and store the results in the cache. Each search has a ``verbose_name`` and a ``slug``. If these are not provided as attributes, they will be automatically generated based on the name of the class.
:param search_arg: The string which is being searched for.
"""
__metaclass__ = BaseSearchMetaclass
#: The number of results to return from the complete list. Default: 5
result_limit = 5
#: How long the items for the search should be cached (in minutes). Default: 48 hours.
_cache_timeout = 60*48
#: The path to the template which will be used to render the :class:`Result`\ s for this search. If this is ``None``, then the framework will try ``sobol/search/<slug>/result.html`` and ``sobol/search/result.html``.
result_template = None
#: The path to the template which will be used to generate the title of the :class:`Result`\ s for this search. If this is ``None``, then the framework will try ``sobol/search/<slug>/title.html`` and ``sobol/search/title.html``.
title_template = None
#: The path to the template which will be used to generate the content of the :class:`Result`\ s for this search. If this is ``None``, then the framework will try ``sobol/search/<slug>/content.html`` and ``sobol/search/content.html``.
content_template = None
def __init__(self, search_arg):
self.search_arg = search_arg
@property
def results(self):
"""Retrieves cached results or initiates a new search via :meth:`get_results` and caches the results."""
if not hasattr(self, '_results'):
try:
# Cache one extra result so we can see if there are
# more results to be had.
limit = self.result_limit
if limit is not None:
limit += 1
results = self.get_results(limit)
except:
if settings.DEBUG:
raise
# On exceptions, don't set any cache; just return.
return []
self._results = results
if USE_CACHE:
for result in results:
result.get_context()
key = _make_cache_key(self, self.search_arg)
cache.set(key, self, self._cache_timeout)
return self._results
def get_results(self, limit=None, result_class=Result):
"""
Calls :meth:`search` and parses the return value into :class:`Result` instances.
:param limit: Passed directly to :meth:`search`.
:param result_class: The class used to represent the results. This will be instantiated with the :class:`BaseSearch` instance and the raw result from the search.
"""
results = self.search(limit)
return [result_class(self, result) for result in results]
def search(self, limit=None):
"""Returns an iterable of up to ``limit`` results. The :meth:`get_result_title`, :meth:`get_result_url`, :meth:`get_result_template`, and :meth:`get_result_extra_context` methods will be used to interpret the individual items that this function returns, so the result can be an object with attributes as easily as a dictionary with keys. However, keep in mind that the raw results will be stored with django's caching mechanisms and will be converted to JSON."""
raise NotImplementedError
def get_actual_result_url(self, result):
"""Returns the actual URL for the ``result`` or ``None`` if there is no URL. Must be implemented by subclasses."""
raise NotImplementedError
def get_result_querydict(self, result):
"""Returns a querydict for tracking selection of the result, or ``None`` if there is no URL for the result."""
url = self.get_actual_result_url(result)
if url is None:
return None
return make_tracking_querydict(self.search_arg, url)
def get_result_url(self, result):
"""Returns ``None`` or a url which, when accessed, will register a :class:`.Click` for that url."""
qd = self.get_result_querydict(result)
if qd is None:
return None
return "?%s" % qd.urlencode()
def get_result_title(self, result):
"""Returns the title of the ``result``. By default, renders ``sobol/search/<slug>/title.html`` or ``sobol/search/title.html`` with the result in the context. This can be overridden by setting :attr:`title_template` or simply overriding :meth:`get_result_title`. If no template can be found, this will raise :exc:`TemplateDoesNotExist`."""
return loader.render_to_string(self.title_template or [
'sobol/search/%s/title.html' % self.slug,
'sobol/search/title.html'
], {'result': result})
def get_result_content(self, result):
"""Returns the content for the ``result``. By default, renders ``sobol/search/<slug>/content.html`` or ``sobol/search/content.html`` with the result in the context. This can be overridden by setting :attr:`content_template` or simply overriding :meth:`get_result_content`. If no template is found, this will return an empty string."""
try:
return loader.render_to_string(self.content_template or [
'sobol/search/%s/content.html' % self.slug,
'sobol/search/content.html'
], {'result': result})
except TemplateDoesNotExist:
return ""
def get_result_template(self, result):
"""Returns the template to be used for rendering the ``result``. For a search with slug ``google``, this would first try ``sobol/search/google/result.html``, then fall back on ``sobol/search/result.html``. Subclasses can override this by setting :attr:`result_template` to the path of another template."""
if self.result_template:
return loader.get_template(self.result_template)
return loader.select_template([
'sobol/search/%s/result.html' % self.slug,
'sobol/search/result.html'
])
@property
def has_more_results(self):
"""Returns ``True`` if there are more results than :attr:`result_limit` and ``False`` otherwise."""
return len(self.results) > self.result_limit
def get_actual_more_results_url(self):
"""Returns the actual url for more results. By default, simply returns ``None``."""
return None
def get_more_results_querydict(self):
"""Returns a :class:`QueryDict` for tracking whether people click on a 'more results' link."""
url = self.get_actual_more_results_url()
if url:
return make_tracking_querydict(self.search_arg, url)
return None
@property
def more_results_url(self):
"""Returns a URL which consists of a querystring which, when accessed, will log a :class:`.Click` for the actual URL."""
qd = self.get_more_results_querydict()
if qd is None:
return None
return "?%s" % qd.urlencode()
def __unicode__(self):
return self.verbose_name
class DatabaseSearch(BaseSearch):
"""Implements :meth:`~BaseSearch.search` and :meth:`get_queryset` methods to handle database queries."""
#: The model which should be searched by the :class:`DatabaseSearch`.
model = None
def search(self, limit=None):
if not hasattr(self, '_qs'):
self._qs = self.get_queryset()
if limit is not None:
self._qs = self._qs[:limit]
return self._qs
def get_queryset(self):
"""Returns a :class:`QuerySet` of all instances of :attr:`model`. This method should be overridden by subclasses to specify how the search should actually be implemented for the model."""
return self.model._default_manager.all()
class URLSearch(BaseSearch):
"""Defines a generic interface for searches that require accessing a certain url to get search results."""
#: The base URL which will be accessed to get the search results.
search_url = ''
#: The url-encoded query string to be used for fetching search results from :attr:`search_url`. Must have one ``%s`` to contain the search argument.
query_format_str = "%s"
@property
def url(self):
"""The URL where the search gets its results. Composed from :attr:`search_url` and :attr:`query_format_str`."""
return self.search_url + self.query_format_str % urlquote_plus(self.search_arg)
def get_actual_more_results_url(self):
return self.url
def parse_response(self, response, limit=None):
"""Handles the ``response`` from accessing :attr:`url` (with :func:`urllib2.urlopen`) and returns a list of up to ``limit`` results."""
raise NotImplementedError
def search(self, limit=None):
return self.parse_response(urllib2.urlopen(self.url), limit=limit)
class JSONSearch(URLSearch):
"""Makes a GET request and parses the results as JSON. The default behavior assumes that the response contains a list of results."""
def parse_response(self, response, limit=None):
return json.loads(response.read())[:limit]
class GoogleSearch(JSONSearch):
"""An example implementation of a :class:`JSONSearch`."""
search_url = "http://ajax.googleapis.com/ajax/services/search/web"
_cache_timeout = 60
verbose_name = "Google search (current site)"
_more_results_url = None
@property
def query_format_str(self):
default_args = self.default_args
if default_args:
default_args += " "
return | |
)
return
if 67 - 67: I1IiiI
if 93 - 93: ooOoO0o . Ii1I + IiII / Oo0Ooo % I11i
if 40 - 40: Oo0Ooo % OoOoOO00 . IiII / I1IiiI % OoooooooOO
if 33 - 33: OOooOOo - OoooooooOO . iII111i
if 2 - 2: I11i + i1IIi
if 52 - 52: I11i - OoO0O00 % I1Ii111 . OOooOOo
if 90 - 90: O0 - Oo0Ooo / i1IIi * iIii1I11I1II1 % o0oOOo0O0Ooo / oO0o
if 73 - 73: iII111i % iIii1I11I1II1 + o0oOOo0O0Ooo % Ii1I . II111iiii + IiII
if 55 - 55: OoOoOO00 * II111iiii / iII111i + OOooOOo / OoooooooOO
if 12 - 12: II111iiii * O0 - Oo0Ooo + o0oOOo0O0Ooo . Oo0Ooo + iIii1I11I1II1
if 4 - 4: I1Ii111 - I1Ii111 / I1ii11iIi11i . i1IIi + I1ii11iIi11i / oO0o
def lisp_encapsulate_rloc_probe ( lisp_sockets , rloc , nat_info , packet ) :
if ( len ( lisp_sockets ) != 4 ) : return
if 18 - 18: iIii1I11I1II1 . ooOoO0o
oO0oOO00 = lisp_myrlocs [ 0 ]
if 33 - 33: OoO0O00 / OOooOOo % Oo0Ooo . o0oOOo0O0Ooo % II111iiii
if 62 - 62: iII111i . OoooooooOO - i1IIi
if 59 - 59: OoOoOO00 + i1IIi * OoooooooOO . oO0o
if 38 - 38: I1ii11iIi11i / o0oOOo0O0Ooo
if 95 - 95: iIii1I11I1II1 / OoOoOO00 % I1Ii111
I1I1 = len ( packet ) + 28
oOo00Ooo0o0 = struct . pack ( "BBHIBBHII" , 0x45 , 0 , socket . htons ( I1I1 ) , 0 , 64 ,
17 , 0 , socket . htonl ( oO0oOO00 . address ) , socket . htonl ( rloc . address ) )
oOo00Ooo0o0 = lisp_ip_checksum ( oOo00Ooo0o0 )
if 54 - 54: OoooooooOO % Ii1I
IIi1ii1 = struct . pack ( "HHHH" , 0 , socket . htons ( LISP_CTRL_PORT ) ,
socket . htons ( I1I1 - 20 ) , 0 )
if 100 - 100: OOooOOo - I11i . O0 * i1IIi % OoooooooOO - ooOoO0o
if 54 - 54: O0 + I11i
if 71 - 71: OoOoOO00
if 29 - 29: O0 . i11iIiiIii
packet = lisp_packet ( oOo00Ooo0o0 + IIi1ii1 + packet )
if 51 - 51: IiII
if 53 - 53: O0
if 19 - 19: o0oOOo0O0Ooo / iII111i % OoOoOO00
if 65 - 65: o0oOOo0O0Ooo
packet . inner_dest . copy_address ( rloc )
packet . inner_dest . instance_id = 0xffffff
packet . inner_source . copy_address ( oO0oOO00 )
packet . inner_ttl = 64
packet . outer_dest . copy_address ( rloc )
packet . outer_source . copy_address ( oO0oOO00 )
packet . outer_version = packet . outer_dest . afi_to_version ( )
packet . outer_ttl = 64
packet . encap_port = nat_info . port if nat_info else LISP_DATA_PORT
if 89 - 89: iIii1I11I1II1 + OoooooooOO + i1IIi + OoooooooOO % IiII * OoO0O00
I111I = red ( rloc . print_address_no_iid ( ) , False )
if ( nat_info ) :
iI111III = " {}" . format ( blue ( nat_info . hostname , False ) )
O00oOoo0OoOOO = bold ( "RLOC-probe request" , False )
else :
iI111III = ""
O00oOoo0OoOOO = bold ( "RLOC-probe reply" , False )
if 53 - 53: OOooOOo . IiII % I11i - OoO0O00 - Oo0Ooo
if 58 - 58: I1Ii111 / OoooooooOO . I11i % I1Ii111
lprint ( ( "Data encapsulate {} to {}{} port {} for " + "NAT-traversal" ) . format ( O00oOoo0OoOOO , I111I , iI111III , packet . encap_port ) )
if 8 - 8: Oo0Ooo % ooOoO0o / i11iIiiIii
if 54 - 54: IiII
if 85 - 85: OOooOOo - i1IIi
if 10 - 10: I1ii11iIi11i
if 3 - 3: ooOoO0o * O0 / o0oOOo0O0Ooo
if ( packet . encode ( None ) == None ) : return
packet . print_packet ( "Send" , True )
if 22 - 22: OoOoOO00 + OOooOOo . iII111i % iIii1I11I1II1 - I11i
iI1iI1Iiii1i1 = lisp_sockets [ 3 ]
packet . send_packet ( iI1iI1Iiii1i1 , packet . outer_dest )
del ( packet )
return
if 67 - 67: OoO0O00 - ooOoO0o . OoO0O00 - ooOoO0o / o0oOOo0O0Ooo / II111iiii
if 77 - 77: Oo0Ooo
if 53 - 53: ooOoO0o * iIii1I11I1II1 . oO0o * Oo0Ooo . Oo0Ooo % iIii1I11I1II1
if 7 - 7: ooOoO0o + Ii1I
if 25 - 25: OoO0O00 * oO0o
if 29 - 29: OOooOOo - I1Ii111 - i11iIiiIii % i1IIi
if 2 - 2: i11iIiiIii % iIii1I11I1II1 * OOooOOo
if 45 - 45: oO0o + i1IIi + iII111i + o0oOOo0O0Ooo * OOooOOo + ooOoO0o
def lisp_get_default_route_next_hops ( ) :
if 83 - 83: OoO0O00 - ooOoO0o / OoooooooOO % iIii1I11I1II1 - II111iiii
if 73 - 73: Oo0Ooo + II111iiii - IiII
if 60 - 60: i1IIi . i11iIiiIii / i1IIi . I11i % OOooOOo
if 47 - 47: oO0o + IiII * I1Ii111 % o0oOOo0O0Ooo - O0 % IiII
if ( lisp_is_macos ( ) ) :
i1i1i1I = "route -n get default"
Oooo0O0ooOooO = commands . getoutput ( i1i1i1I ) . split ( "\n" )
I1iI11iII11 = iIiiiIiIi = None
for Oo0OO0o0oOO0 in Oooo0O0ooOooO :
if ( Oo0OO0o0oOO0 . find ( "gateway: " ) != - 1 ) : I1iI11iII11 = Oo0OO0o0oOO0 . split ( ": " ) [ 1 ]
if ( Oo0OO0o0oOO0 . find ( "interface: " ) != - 1 ) : iIiiiIiIi = Oo0OO0o0oOO0 . split ( ": " ) [ 1 ]
if 56 - 56: Ii1I + i1IIi / II111iiii
return ( [ [ iIiiiIiIi , I1iI11iII11 ] ] )
if 54 - 54: O0 * IiII + i11iIiiIii - oO0o - ooOoO0o + i11iIiiIii
if 87 - 87: I1ii11iIi11i * iIii1I11I1II1 / I1Ii111
if 5 - 5: i1IIi * IiII / iIii1I11I1II1 * OoooooooOO . O0
if 57 - 57: i11iIiiIii
if 89 - 89: o0oOOo0O0Ooo . I1Ii111 * I11i + oO0o - OoooooooOO + OoO0O00
i1i1i1I = "ip route | egrep 'default via'"
ooO0oO00O = commands . getoutput ( i1i1i1I ) . split ( "\n" )
if 25 - 25: i1IIi * I1Ii111 * iII111i . OoooooooOO
IiiI1iiI11 = [ ]
for OO0oOo in ooO0oO00O :
if ( OO0oOo . find ( " metric " ) != - 1 ) : continue
iIOoo000 = OO0oOo . split ( " " )
try :
ooOiiIiI1I = iIOoo000 . index ( "via" ) + 1
if ( ooOiiIiI1I >= len ( iIOoo000 ) ) : continue
O0OO0 = iIOoo000 . index ( "dev" ) + 1
if ( O0OO0 >= len ( iIOoo000 ) ) : continue
except :
continue
if 60 - 60: OoO0O00 / I1ii11iIi11i % iII111i % i11iIiiIii * OoooooooOO * iII111i
if 92 - 92: I11i % iIii1I11I1II1 * iII111i - OoooooooOO - I11i
IiiI1iiI11 . append ( [ iIOoo000 [ O0OO0 ] , iIOoo000 [ ooOiiIiI1I ] ] )
if 34 - 34: I1Ii111 / i1IIi / O0 / OoooooooOO
return ( IiiI1iiI11 )
if 55 - 55: I1Ii111 . I1IiiI * iIii1I11I1II1 / Ii1I . I1IiiI
if 63 - 63: ooOoO0o . Ii1I - I1Ii111 - oO0o * I1Ii111 + ooOoO0o
if 85 - 85: II111iiii + I1ii11iIi11i
if 33 - 33: iII111i
if 14 - 14: O0 * Oo0Ooo / i1IIi
if 95 - 95: O0 % i1IIi % ooOoO0o % oO0o - I1IiiI
if 78 - 78: II111iiii % OOooOOo
def lisp_get_host_route_next_hop ( rloc ) :
i1i1i1I = "ip route | egrep '{} via'" . format ( rloc )
OO0oOo = commands . getoutput ( i1i1i1I ) . split ( " " )
if 6 - 6: OOooOOo
try : OOOoO000 = OO0oOo . index ( "via" ) + 1
except : return ( None )
if 21 - 21: I1Ii111 - Ii1I - i1IIi % oO0o
if ( OOOoO000 >= len ( OO0oOo | |
import socketio
import json
import os
import io
import requests
import socket
import time
from flask import Flask, request, jsonify, Response
from minio import Minio
from pymongo import MongoClient
# from minio.error import S3Error
# Example of how the analysis is getting saved
#---------------------------------------------
# Metis -- Analysis-1 -- Raw-Files-1
# | |- Data-Load-1
# | |- Raw-Files-2
# | |- Data-Load-2
# | |- Analysis-1-1
# | |- Analysis-1-2
# | |- Analysis-2-1
# |
# |- Analysis-2 -- Raw-Files-1
# |- Data-Load-1
# |- Analysis-1-1
# |- Analysis-1-2
#
# BIO -- Analysis-1 -- Raw-Files-1
# |- Data-Load-1
# |- Raw-Files-2
# |- Data-Load-2
# |- Analysis-1-1
# |- Analysis-1-2
# |- Analysis-1-2-1
# |- Analysis-1-2-2
# |- Analysis-1-3
# |- Analysis-2-1
""" Initiate flask app with WSGI """
sio = socketio.Server()
app = Flask(__name__)
app.wsgi_app = socketio.WSGIApp(sio, app.wsgi_app)
""" Environment Variables """
# Flask app Host and Port
HOST = os.getenv("HOST", "0.0.0.0")
PORT = int(os.getenv("PORT", 5000))
# MinIO Host, Port and user details
MINIO_HOST = os.getenv("MINIO_HOST", "localhost")
MINIO_PORT = int(os.getenv("MINIO_PORT", 9000))
MINIO_USER = os.getenv("MINIO_USER", "diastema")
MINIO_PASS = os.getenv("MINIO_PASS", "<PASSWORD>")
# MongoDB Host and Port
MONGO_HOST = os.getenv("MONGO_HOST", "localhost")
MONGO_PORT = int(os.getenv("MONGO_PORT", 27017))
# Diastema key
DIASTEMA_KEY = os.getenv("DIASTEMA_KEY", "diastema-key")
# Diastema Front End Host and Port
DIASTEMA_FRONTEND_HOST = os.getenv("DIASTEMA_FRONTEND_HOST", "localhost")
DIASTEMA_FRONTEND_PORT = int(os.getenv("DIASTEMA_FRONTEND_PORT", 5001))
# Diastema Analytcs API Host and Port
DIASTEMA_SERVICES_HOST = os.getenv("DIASTEMA_SERVICES_HOST", "localhost")
DIASTEMA_SERVICES_PORT = int(os.getenv("DIASTEMA_SERVICES_PORT", 5001))
# Spark Cluster Details
KUBERNETES_HOST = os.getenv("KUBERNETES_HOST", "localhost")
KUBERNETES_PORT = int(os.getenv("KUBERNETES_PORT", 6006))
""" Global variables """
# Diastema Token
diastema_token = DIASTEMA_KEY
# Kubernetes component connection to call spark jobs
K8S_HEADER = 64
K8S_FORMAT = 'utf-8'
K8S_ADDR = (KUBERNETES_HOST, KUBERNETES_PORT)
# MongoDB HOST
mongo_host = MONGO_HOST+":"+str(MONGO_PORT)
mongo_client = MongoClient("mongodb://"+mongo_host+"/")
# MinIO HOST and Client
minio_host = MINIO_HOST+":"+str(MINIO_PORT)
minio_client = Minio(
minio_host,
access_key=MINIO_USER,
secret_key=MINIO_PASS,
secure=False
)
# Diastema Front End url
diastema_front_end_url = "http://"+DIASTEMA_FRONTEND_HOST+":"+str(DIASTEMA_FRONTEND_PORT)+"/messages"
# Diastema Services url
diastema_services_url = "http://"+DIASTEMA_SERVICES_HOST+":"+str(DIASTEMA_SERVICES_PORT)+"/"
""" Frequently used code """
# Make a good MinIO String
def minioString(obj):
"""
A Function to cast an object to str and then lowercase it.
This Function is helping to name paths, needed for the analysis in the right way.
Args:
- obj (Python Object): An object to turn it into a lowercase String.
Returns:
- Lower cased String (String): The lowecased String of the given object.
"""
return str(obj).lower()
# Insert one record in mongo
def insertMongoRecord(mongo_db_client, mongo_db_analysis_collection, record):
"""
A Function used to insert records into the Diastema MongoDB Server.
Args:
- mongo_db_client (String): A MongoDB Database as the user who wants to make an analysis.
- mongo_db_analysis_collection (String): A MongoDB Collection as an Analysis of a User.
- record (JSON): The record to insert in the given collection.
Returns:
- Nothing
"""
mongo_db = mongo_client[mongo_db_client]
analysis_collection = mongo_db[mongo_db_analysis_collection]
analysis_collection.insert_one(record)
return
# Contact Diastema Front-End for the ending of a job
def diastema_call(message, update = -1, visualization_path = -1, job_name = -1, column = -1):
"""
This function is making an API request to the Diastema central API Server.
It will inform it for the end of a job, or the end of the whole analysis.
Args:
- visualization_path (String): The path of the MinIO objects to be visualised.
- job_name (String): A job name, or the "analysis" value.
Returns:
- Nothing
"""
url = diastema_front_end_url
form_data = {}
if(message == "update"):
form_data = {
"message": "update",
"update": update
}
elif(message == "visualize"):
form_data = {
"message": "visualize",
"path": visualization_path,
"job": job_name,
"column": column
}
requests.post(url, form_data)
return
# Function to start the Services of Diastema
def startService(service_name, json_body):
url = diastema_services_url+service_name
requests.post(url, json=json_body)
return
# Function to view the progress of Diastema Services
def waitForService(service_name, job_id):
url = diastema_services_url+service_name+"/progress?id="+str(job_id)
responce = requests.get(url)
while True:
time.sleep(2)
if(responce.text == "complete"):
break
responce = requests.get(url)
return
# Function to get the results of a Diastema Service
def getServiceResults(service_name, job_id):
url = diastema_services_url+service_name+"/"+str(job_id)
responce = requests.get(url)
return
""" Functions to call a spark job """
# Function to send message through sockets
def kubernetes_send(msg):
socket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_client.connect(K8S_ADDR)
message = msg.encode(K8S_FORMAT)
msg_length = len(message)
send_length = str(msg_length).encode(K8S_FORMAT)
send_length += b' ' * (K8S_HEADER - len(send_length))
socket_client.send(send_length)
socket_client.send(message)
socket_client.recv(2048).decode(K8S_FORMAT)
# print(socket_client.recv(2048).decode(K8S_FORMAT))
return
# Function assembling the json content needed to run a spark job in kubernetes
def spark_caller(call_args):
# minikube default host
master_host = "192.168.49.2"
# minikube default port
master_port = "8443"
# Probably a variable
app_name = "distema-job"
# variable
path = "local://"+call_args[0]
# variable
algorithm = call_args[1]
# variable
minio_input = call_args[2]
# variable
minio_output = call_args[3]
# variable
column = call_args[4]
diaste_kube_json = {
"master-host" : master_host,
"master-port" : master_port,
"app-name" : app_name,
"minio-host" : MINIO_HOST,
"minio-port" : str(MINIO_PORT),
"minio-user" : MINIO_USER,
"minio-pass" : MINIO_PASS,
"path" : path,
"algorithm" : algorithm,
"minio-input" : minio_input,
"minio-output" : minio_output,
"column" : column
}
kubernetes_send(json.dumps(diaste_kube_json))
return
""" Spark Jobs And Diastema API Jobs """
# Data load job
def data_load(playbook, job, data_set_files):
"""
A function to handle a Data Loading Job from the Diastema JSON playbook.
It will setup the folders needed for the spark jobs in the MinIO Database.
Then it will call the Spark Job after configuring the way that the job must be called.
After the above, it will use the MongoDB to save the path and the whole job in the needed analysis collection.
Args:
- playbook (JSON): The Diastema playbook.
- job (JSON): This Data Loading Job from the Diastema playbook.
- data_set_files (String): The path of the Data set files.
Returns:
- MinIO path (String): The path that the loaded data are saved.
"""
# Raw bucket = User/analysis-id/job(-id) - ID will be included in later updates
# raw_bucket = minioString(playbook["database-id"])+"/analysis-"+minioString(playbook["analysis-id"])+"/raw-"+minioString(job["id"])
raw_bucket = minioString(playbook["database-id"])+"/analysis-"+minioString(playbook["analysis-id"])+"/raw"
# Bucket to Load Data = User/analysis-id/job-step
load_bucket = minioString(playbook["database-id"])+"/analysis-"+minioString(playbook["analysis-id"])+"/loaded-"+minioString(job["step"])
# Make the load Bucket directory
minio_client.put_object(minioString(playbook["database-id"]), "analysis-"+minioString(playbook["analysis-id"])+"/loaded-"+minioString(job["step"])+"/", io.BytesIO(b""), 0,)
# Make websocket call for the Data Loading Service
loading_info = {"minio-input": raw_bucket, "minio-output": load_bucket, "job-id":minioString(job["id"])}
# Start Loading Service
startService("data-loading", loading_info)
# Wait for loading to End
waitForService("data-loading", job["id"])
# Insert the raw and loaded data in MongoDB
raw_job_record = {"minio-path":raw_bucket, "directory-kind":"raw-data", "for-job-step":minioString(job["step"])}
data_load_job_record = {"minio-path":load_bucket, "directory-kind":"loaded-data", "job-json":job}
insertMongoRecord(minioString(playbook["database-id"]), "analysis_"+minioString(playbook["analysis-id"]), raw_job_record)
insertMongoRecord(minioString(playbook["database-id"]), "analysis_"+minioString(playbook["analysis-id"]), data_load_job_record)
# Contact front end for the ending of the job
# diastema_call(minioString(playbook["database-id"]), "analysis-"+minioString(playbook["analysis-id"]), "data-load")
diastema_call(message = "update", update = ("Loaded Dataset with ID: "+minioString(job["id"])))
# Return the bucket that this job made output to
return load_bucket
# Cleaning job
def cleaning(playbook, job, last_bucket, max_shrink=False, json_schema=False):
"""
A function to handle a Data Cleaning Job from the Diastema JSON playbook.
It will setup the folders needed for the spark jobs in the MinIO Database.
Then it will call the Spark Job after configuring the way that the job must be called.
After the above, it will use the MongoDB to save the path and the whole job in the needed analysis collection.
Args:
- playbook (JSON): The Diastema playbook.
- job (JSON): This Data Cleaning Job from the Diastema playbook.
- last_bucket (String): The path that the raw data are saved.
- max_shrink (float): The maximm shrinking of the data set to be cleaned.
- json_schema (JSON): A JSON schema for the data cleaning job.
Returns:
- MinIO path (String): The path that the cleaned data are saved.
"""
# Data Bucket = last jobs output bucket
data_bucket = last_bucket
# Analysis Bucket = User/analysis-id/job-step
analysis_bucket = minioString(playbook["database-id"])+"/analysis-"+minioString(playbook["analysis-id"])+"/cleaned-"+minioString(job["step"])
# Jobs arguments
#job_args = ["/root/spark-job/cleaning-job.py", data_bucket, analysis_bucket]
# Optional args
#if max_shrink != False:
# job_args.append('"'+str(max_shrink)+'"')
#if json_schema != False:
# job_args.append('"'+str(json_schema)+'"')
# Make the MinIO Analysis buckers
minio_client.put_object(minioString(playbook["database-id"]), "analysis-"+minioString(playbook["analysis-id"])+"/cleaned-"+minioString(job["step"])+"/", io.BytesIO(b""), 0,)
# Make the websocket call for the Data Cleaning Service
form_data = {"minio-input": data_bucket, "minio-output": analysis_bucket, "job-id":minioString(job["id"])}
# Optional attr max-shrink
if max_shrink != False:
form_data["max-shrink"] = max_shrink
# Make websocket call for the Data Loading Service
cleaning_info = form_data
# Start Loading Service
startService("data-cleaning", cleaning_info)
# Wait for loading to End
waitForService("data-cleaning", job["id"])
# Insert the cleaned data in MongoDB
cleaning_job_record = {"minio-path":analysis_bucket, "directory-kind":"cleaned-data", "job-json":job}
insertMongoRecord(minioString(playbook["database-id"]), "analysis_"+minioString(playbook["analysis-id"]), cleaning_job_record)
# Contact front end for the ending of the job
# diastema_call(minioString(playbook["database-id"]), "analysis-"+minioString(playbook["analysis-id"]), "cleaning")
diastema_call(message = "update", update = "Cleaning executed.")
# Return the bucket that this job made output to
return analysis_bucket
# Classification job
def classification(playbook, job, last_bucket, algorithm=False, tensorfow_algorithm=False):
"""
A function to handle a Classification Analysis Job from the Diastema JSON playbook.
It will setup the folders needed for the spark jobs in the MinIO Database.
Then it will call the Spark | |
<gh_stars>1-10
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Manages WrapperFormatter objects.
WrapperFormatter objects can be used for wrapping CLI column celldata in order
for the CLI table (using prettyTable) to fit the terminal screen
The basic idea is:
Once celldata is retrieved and ready to display, first iterate through the celldata
and word wrap it so that fits programmer desired column widths. The
WrapperFormatter objects fill this role.
Once the celldata is formatted to their desired widths, then it can be passed to
the existing prettyTable code base for rendering.
"""
import copy
import re
import six
import textwrap
from fmclient.common.cli_no_wrap import is_nowrap_set
from fmclient.common.cli_no_wrap import set_no_wrap
from prettytable import _get_size
from six.moves import range
UUID_MIN_LENGTH = 36
# monkey patch (customize) how the textwrap module breaks text into chunks
wordsep_re = re.compile(r'(\s+|' # any whitespace
r',|'
r'=|'
r'\.|'
r':|'
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
textwrap.TextWrapper.wordsep_re = wordsep_re
def get_width(value):
if value is None:
return 0
return _get_size(six.text_type(value))[0] # get width from [width,height]
def _get_terminal_width():
from fmclient.common.utils import get_terminal_size
result = get_terminal_size()[0]
return result
def is_uuid_field(field_name):
"""
:param field_name:
:return: True if field_name looks like a uuid name
"""
if field_name is not None and field_name in ["uuid", "UUID"] or field_name.endswith("uuid"):
return True
return False
class WrapperContext(object):
"""Context for the wrapper formatters
Maintains a list of the current WrapperFormatters
being used to format the prettyTable celldata
Allows wrappers access to its 'sibling' wrappers
contains convenience methods and attributes
for calculating current tableWidth.
"""
def __init__(self):
self.wrappers = []
self.wrappers_by_field = {}
self.non_data_chrs_used_by_table = 0
self.num_columns = 0
self.terminal_width = -1
def set_num_columns(self, num_columns):
self.num_columns = num_columns
self.non_data_chrs_used_by_table = (num_columns * 3) + 1
def add_column_formatter(self, field, wrapper):
self.wrappers.append(wrapper)
self.wrappers_by_field[field] = wrapper
def get_terminal_width(self):
if self.terminal_width == -1:
self.terminal_width = _get_terminal_width()
return self.terminal_width
def get_table_width(self):
"""
Calculates table width by looping through all
column formatters and summing up their widths
:return: total table width
"""
widths = [w.get_actual_column_char_len(w.get_calculated_desired_width(), check_remaining_row_chars=False) for w
in
self.wrappers]
chars_used_by_data = sum(widths)
width = self.non_data_chrs_used_by_table + chars_used_by_data
return width
def is_table_too_wide(self):
"""
:return: True if calculated table width is too wide for the terminal width
"""
if self.get_terminal_width() < self.get_table_width():
return True
return False
def field_value_function_factory(formatter, field):
"""Builds function for getting a field value from table cell celldata
As a side-effect, attaches function as the 'get_field_value' attribute
of the formatter
:param formatter:the formatter to attach return function to
:param field:
:return: function that returns cell celldata
"""
def field_value_function_builder(data):
if isinstance(data, dict):
formatter.get_field_value = lambda celldata: celldata.get(field, None)
else:
formatter.get_field_value = lambda celldata: getattr(celldata, field)
return formatter.get_field_value(data)
return field_value_function_builder
class WrapperFormatter(object):
"""Base (abstract) class definition of wrapping formatters"""
def __init__(self, ctx, field):
self.ctx = ctx
self.add_blank_line = False
self.no_wrap = False
self.min_width = 0
self.field = field
self.header_width = 0
self.actual_column_char_len = -1
self.textWrapper = None
if self.field:
self.get_field_value = field_value_function_factory(self, field)
else:
self.get_field_value = lambda data: data
def get_basic_desired_width(self):
return self.min_width
def get_calculated_desired_width(self):
basic_desired_width = self.get_basic_desired_width()
if self.header_width > basic_desired_width:
return self.header_width
return basic_desired_width
def get_sibling_wrappers(self):
"""
:return: a list of your sibling wrappers for the other fields
"""
others = [w for w in self.ctx.wrappers if w != self]
return others
def get_remaining_row_chars(self):
used = [w.get_actual_column_char_len(w.get_calculated_desired_width(),
check_remaining_row_chars=False)
for w in self.get_sibling_wrappers()]
chrs_used_by_data = sum(used)
remaining_chrs_in_row = (self.ctx.get_terminal_width() -
self.ctx.non_data_chrs_used_by_table) - chrs_used_by_data
return remaining_chrs_in_row
def set_min_width(self, min_width):
self.min_width = min_width
def set_actual_column_len(self, actual):
self.actual_column_char_len = actual
def get_actual_column_char_len(self, desired_char_len, check_remaining_row_chars=True):
"""Utility method to adjust desired width to a width
that can actually be applied based on current table width
and current terminal width
Will not allow actual width to be less than min_width
min_width is typically length of the column header text
or the longest 'word' in the celldata
:param desired_char_len:
:param check_remaining_row_chars:
:return:
"""
if self.actual_column_char_len != -1:
return self.actual_column_char_len # already calculated
if desired_char_len < self.min_width:
actual = self.min_width
else:
actual = desired_char_len
if check_remaining_row_chars and actual > self.min_width:
remaining = self.get_remaining_row_chars()
if actual > remaining >= self.min_width:
actual = remaining
if check_remaining_row_chars:
self.set_actual_column_len(actual)
if self.ctx.is_table_too_wide():
# Table too big can I shrink myself?
if actual > self.min_width:
# shrink column
while actual > self.min_width:
actual -= 1 # TODO(jkung): fix in next sprint
# each column needs to share in
# table shrinking - but this is good
# enough for now - also - why the loop?
self.set_actual_column_len(actual)
return actual
def _textwrap_fill(self, s, actual_width):
if not self.textWrapper:
self.textWrapper = textwrap.TextWrapper(actual_width)
else:
self.textWrapper.width = actual_width
return self.textWrapper.fill(s)
def text_wrap(self, s, width):
"""
performs actual text wrap
:param s:
:param width: in characters
:return: formatted text
"""
if self.no_wrap:
return s
actual_width = self.get_actual_column_char_len(width)
new_s = self._textwrap_fill(s, actual_width)
wrapped = new_s != s
if self.add_blank_line and wrapped:
new_s += "\n".ljust(actual_width)
return new_s
def format(self, data):
return str(self.get_field_value(data))
def get_unwrapped_field_value(self, data):
return self.get_field_value(data)
def as_function(self):
def foo(data):
return self.format(data)
foo.WrapperFormatterMarker = True
foo.wrapper_formatter = self
return foo
@staticmethod
def is_wrapper_formatter(foo):
if not foo:
return False
return getattr(foo, "WrapperFormatterMarker", False)
class WrapperLambdaFormatter(WrapperFormatter):
"""A wrapper formatter that adapts a function (callable)
to look like a WrapperFormatter
"""
def __init__(self, ctx, field, format_function):
super(WrapperLambdaFormatter, self).__init__(ctx, field)
self.format_function = format_function
def format(self, data):
return self.format_function(self.get_field_value(data))
class WrapperFixedWidthFormatter(WrapperLambdaFormatter):
"""A wrapper formatter that forces the text to wrap within
a specific width (in chars)
"""
def __init__(self, ctx, field, width):
super(WrapperFixedWidthFormatter, self).__init__(ctx, field,
lambda data:
self.text_wrap(str(data),
self.get_calculated_desired_width()))
self.width = width
def get_basic_desired_width(self):
return self.width
class WrapperPercentWidthFormatter(WrapperFormatter):
"""A wrapper formatter that forces the text to wrap within
a specific percentage width of the current terminal width
"""
def __init__(self, ctx, field, width_as_decimal):
super(WrapperPercentWidthFormatter, self).__init__(ctx, field)
self.width_as_decimal = width_as_decimal
def get_basic_desired_width(self):
width = int((self.ctx.get_terminal_width() - self.ctx.non_data_chrs_used_by_table) *
self.width_as_decimal)
return width
def format(self, data):
width = self.get_calculated_desired_width()
field_value = self.get_field_value(data)
return self.text_wrap(str(field_value), width)
class WrapperWithCustomFormatter(WrapperLambdaFormatter):
"""A wrapper formatter that allows the programmer to have a custom
formatter (in the form of a function) that is first applied
and then a wrapper function is applied to the result
See wrapperFormatterFactory for a better explanation! :-)
"""
# noinspection PyUnusedLocal
def __init__(self, ctx, field, custom_formatter, wrapper_formatter):
super(WrapperWithCustomFormatter, self).__init__(ctx, None,
lambda data: wrapper_formatter.format(custom_formatter(data)))
self.wrapper_formatter = wrapper_formatter
self.custom_formatter = custom_formatter
def get_unwrapped_field_value(self, data):
return self.custom_formatter(data)
def __setattr__(self, name, value):
#
# Some attributes set onto this class need
# to be pushed down to the 'inner' wrapper_formatter
#
super(WrapperWithCustomFormatter, self).__setattr__(name, value)
if hasattr(self, "wrapper_formatter"):
if name == "no_wrap":
self.wrapper_formatter.no_wrap = value
if name == "add_blank_line":
self.wrapper_formatter.add_blank_line = value
if name == "header_width":
self.wrapper_formatter.header_width = value
def set_min_width(self, min_width):
super(WrapperWithCustomFormatter, self).set_min_width(min_width)
self.wrapper_formatter.set_min_width(min_width)
def set_actual_column_len(self, actual):
super(WrapperWithCustomFormatter, self).set_actual_column_len(actual)
self.wrapper_formatter.set_actual_column_len(actual)
def get_basic_desired_width(self):
return self.wrapper_formatter.get_basic_desired_width()
def wrapper_formatter_factory(ctx, field, formatter):
"""
This function is a factory for building WrapperFormatter objects.
The function needs to be called for each celldata column (field)
that will be displayed in the prettyTable.
The function looks at the formatter parameter and based on its type,
determines what WrapperFormatter to construct per field (column).
ex:
formatter = 15 - type = int : Builds a WrapperFixedWidthFormatter that
will wrap at 15 chars
formatter = .25 - type = int : Builds a WrapperPercentWidthFormatter that
will wrap at 25% terminal width
formatter = type = callable : Builds a WrapperLambdaFormatter that
will call some arbitrary function
formatter = type = dict : Builds a WrapperWithCustomFormatter that
will call some arbitrary function to format
and then apply a wrapping formatter to the result
ex: this dict {"formatter" : captializeFunction,,
"wrapperFormatter": .12}
will apply the captializeFunction to the column
celldata and then wordwrap at 12 % of terminal width
:param ctx: the WrapperContext that the built WrapperFormatter will use
:param field: name of field (column_ that the WrapperFormatter will execute on
:param formatter: specifies type and input for WrapperFormatter that will be built
:return: WrapperFormatter
"""
if isinstance(formatter, WrapperFormatter):
return formatter
if callable(formatter):
return WrapperLambdaFormatter(ctx, field, formatter)
if isinstance(formatter, int):
return WrapperFixedWidthFormatter(ctx, field, formatter)
if isinstance(formatter, float):
return WrapperPercentWidthFormatter(ctx, field, formatter)
if isinstance(formatter, dict):
if "wrapperFormatter" in formatter:
embedded_wrapper_formatter = wrapper_formatter_factory(ctx, None,
formatter["wrapperFormatter"])
elif "hard_width" in formatter:
embedded_wrapper_formatter = WrapperFixedWidthFormatter(ctx, field, formatter["hard_width"])
embedded_wrapper_formatter.min_width = formatter["hard_width"]
else:
| |
frma = "https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/FRMA/"
lio = "https://w3id.org/lio/v1#"
# treeClassQuery = """
# select *
# where{
# ?class a owl:Class.
# ?class rdfs:label ?name.
#
# OPTIONAL {
# ?class rdfs:subClassOf ?super .
# ?super a owl:Class.
# ?super rdfs:label ?super_name.
# }
# }
# """
correctQuery = "filter(?classification = ?Name)"
incorrectQuery = "filter(?classification != ?Name)"
correctQuery2 = "filter(?Correct = ?Total)"
incorrectQuery2 = "filter(?Correct < ?Total)"
treeClassQuery = """
prefix mlmo: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/MachineLearningModelOntology/>
prefix fibo-fnd-arr-arr: <http://www.omg.org/spec/EDMC-FIBO/FND/Arrangements/Arrangements/>
prefix lio: <http://purl.org/net/lio#>
prefix lcc-lr: <http://www.omg.org/spec/LCC/Languages/LanguageRepresentation/>
prefix fibo-fnd-aap-a: <http://www.omg.org/spec/EDMC-FIBO/FND/AgentsAndPeople/Agents/>
prefix img: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/ImageOntology/>
prefix frma: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/FRMA/>
prefix pfd: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/>
prefix ho: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/HairOntology/>
prefix wt: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/WearableThingsOntology/>
select distinct ?class ?name ?super ?super_name
where{
{
?super rdfs:subClassOf ?restriction .
?restriction rdf:type owl:Restriction .
?restriction owl:someValuesFrom ?class .
?class a owl:Class.
?class rdfs:label ?name.
bind(<https://w3id.org/lio/v1#Image> AS ?super)
bind("image" AS ?super_name)
}
union
{
?class a owl:Class.
?class rdfs:label ?name.
?class rdfs:subClassOf ?super .
bind(<https://w3id.org/lio/v1#Image> AS ?super)
bind("image" AS ?super_name)
}
union
{
values (?class ?name ?super ?super_name){
(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/FRMA/MugShotPhoto> "mug shot photo" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/ImageOntology/PosedImage> "posed image")
(<https://w3id.org/lio/v1#PictorialElement> "pictorial element" <https://w3id.org/lio/v1#Image> "image")
(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Demographic> "demographic" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Person> "person")
(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/WearableThingsOntology/WearableObject> "wearable object" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Person> "person")
(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/VisualDescriptor> "visual descriptor" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Person> "person")
(<http://purl.obolibrary.org/obo/UBERON_0001567> "cheek" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Person> "person")
(<http://purl.obolibrary.org/obo/UBERON_0008199> "chin" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Person> "person")
(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/HairOntology/Hair> "hair" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Person> "person")
(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Person> "person" <https://w3id.org/lio/v1#PictorialElement> "pictorial element")
(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/HairOntology/HairColor> "hair color" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/HairOntology/Hair> "hair")
(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/HairOntology/HairTexture> "hair texture" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/HairOntology/HeadHair> "head hair")
(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/HairOntology/Haircut> "haircut" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/HairOntology/HeadHair> "head hair")
(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/FacialExpression> "facial expression" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Person> "person")
(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/FaceShape> "face shape" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Person> "person")
(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/SkinTone> "skin tone" <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Person> "person")
}
}
union
{
?class a owl:Class.
?class rdfs:label ?name.
OPTIONAL {
?class rdfs:subClassOf ?super .
?super a owl:Class.
?super rdfs:label ?super_name.
}
}
filter(img:ImageFile != ?class)
filter(mlmo:Datum != ?super)
filter(mlmo:Activity != ?super)
filter(mlmo:Layer != ?super)
filter(mlmo:Dataset != ?super)
filter(mlmo:Model != ?super)
filter(mlmo:NeuralNetwork != ?super)
filter(<http://purl.obolibrary.org/obo/UBERON_0000475> != ?super)
filter(<http://purl.obolibrary.org/obo/UBERON_0011676> != ?super)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/ForheadVisibility> != ?super)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/MouthOpenState> != ?super)
filter(<http://purl.obolibrary.org/obo/UBERON_0001444> != ?super)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/ForheadVisibility> != ?class)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/MouthOpenState> != ?class)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Finish> != ?super)
filter(<http://purl.obolibrary.org/obo/UBERON_0000020> != ?super)
filter(<http://purl.obolibrary.org/obo/UBERON_0000062> != ?super)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Color> != ?super)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Color> != ?class)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/Size> != ?super)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/VisualDescriptor> != ?super)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/VisualDescriptor> != ?class)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/NoseShape> != ?super)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/HairOntology/HairColor> != ?class)
filter(<https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/ImageOntology/OptimalConditionImage> != ?class)
} ORDER BY ?super_name ?name
"""
# baseQuery = """
# prefix mlmo: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/MachineLearningModelOntology/>
# prefix fibo-fnd-arr-arr: <http://www.omg.org/spec/EDMC-FIBO/FND/Arrangements/Arrangements/>
# prefix lio: <http://purl.org/net/lio#>
# prefix lcc-lr: <http://www.omg.org/spec/LCC/Languages/LanguageRepresentation/>
# prefix fibo-fnd-aap-a: <http://www.omg.org/spec/EDMC-FIBO/FND/AgentsAndPeople/Agents/>
# prefix img: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/ImageOntology/>
# prefix frma: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/FRMA/>
# prefix pfd: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/>
# prefix ho: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/HairOntology/>
# prefix wt: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/WearableThingsOntology/>
#
# select distinct ?Image ?classification ?Name
# where {
# ?ResultSet fibo-fnd-arr-arr:hasConstituent ?Result .
# ?Result lcc-lr:hasTag ?classification .
# ?Result mlmo:hasFeature ?Image .
# ?Image lio:depicts ?Person .
# ?Person fibo-fnd-aap-a:hasName ?Name .
# """
selectCount = """
select distinct ?Image ?Name ?Correct ?Total
where {
"""
countImages = """
select(count(distinct ?Image) as ?count)
where {
"""
# selectCorrect = """
# select distinct ?Image ?Name (count(distinct ?Result) as ?Correct)
# where {
# """
baseQuery = """
?Image lio:depicts ?Person .
?Person fibo-fnd-aap-a:hasName ?Name .
"""
selectTruePositive = """
select distinct ?Image ?Name (count(distinct ?Result) as ?truePositive)
where {
""" + baseQuery
endTruePositive = """
optional{
?ResultSet fibo-fnd-arr-arr:hasConstituent ?Result .
?Result mlmo:hasFeature ?Image .
?Result mlmo:hasFeature ?Image2 .
filter(?Image != ?Image2)
?Image2 lio:depicts ?Person2 .
?Person2 fibo-fnd-aap-a:hasName ?Name2 .
?Result lcc-lr:hasTag "Match"^^xsd:string .
filter (?Name = ?Name2)
}
} GROUP BY ?Image ?Name
"""
selectTrueNegative = """
select distinct ?Image ?Name (count(distinct ?Result) as ?trueNegative)
where {
""" + baseQuery
endTrueNegative = """
optional{
?ResultSet fibo-fnd-arr-arr:hasConstituent ?Result .
?Result mlmo:hasFeature ?Image .
?Result mlmo:hasFeature ?Image2 .
filter(?Image != ?Image2)
?Image2 lio:depicts ?Person2 .
?Person2 fibo-fnd-aap-a:hasName ?Name2 .
?Result lcc-lr:hasTag "Not a Match"^^xsd:string .
filter (?Name != ?Name2)
}
} GROUP BY ?Image ?Name
"""
# endCorrect = """
# filter(?classification = ?Name)
# } GROUP BY ?Image ?Name
# """
baseQuery = """
?Image lio:depicts ?Person .
?Person fibo-fnd-aap-a:hasName ?Name .
"""
selectTotal = """
select distinct ?Image ?Name (count(distinct ?Result) as ?Total)
where {
"""
baseGraph = """
?ResultSet fibo-fnd-arr-arr:hasConstituent ?Result .
?Result mlmo:hasFeature ?Image .
?Result mlmo:hasFeature ?Image2 .
filter(?Image != ?Image2)
?Image lio:depicts ?Person .
?Image2 lio:depicts ?Person2 .
?Person fibo-fnd-aap-a:hasName ?Name .
"""
endTotal = """
} GROUP BY ?Image ?Name
"""
prefix = """
prefix mlmo: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/MachineLearningModelOntology/>
prefix fibo-fnd-arr-arr: <http://www.omg.org/spec/EDMC-FIBO/FND/Arrangements/Arrangements/>
prefix lio: <http://purl.org/net/lio#>
prefix lcc-lr: <http://www.omg.org/spec/LCC/Languages/LanguageRepresentation/>
prefix fibo-fnd-aap-a: <http://www.omg.org/spec/EDMC-FIBO/FND/AgentsAndPeople/Agents/>
prefix img: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/ImageOntology/>
prefix frma: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/FRMA/>
prefix pfd: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/PersonFaceAndDemographicOntology/>
prefix ho: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/HairOntology/>
prefix wt: <https://tw.rpi.edu/Courses/Ontologies/2018/FRMA/WearableThingsOntology/>
prefix olo: <http://purl.org/ontology/olo/core#>
"""
resultSetQuery = prefix + """
select distinct ?ResultSet
where{
?ResultSet a mlmo:ResultSet .
}
"""
baseCountQuery = """
select (count(distinct ?Result) as ?count)
where {
?ResultSet fibo-fnd-arr-arr:hasConstituent ?Result .
?Result mlmo:hasFeature ?Image .
?Result mlmo:hasFeature ?Image2 .
filter(?Image != ?Image2)
?Image lio:depicts ?Person .
?Image2 lio:depicts ?Person2 .
"""
baseAccQuery = """
## select ?numCorrect ?count
select (?truePositive + ?trueNegative as ?numCorrect) ?count
where {
"""
# baseNumCorrectQuery = """
# select (count(distinct ?classification) as ?numCorrect)
# where {
# ?ResultSet fibo-fnd-arr-arr:hasConstituent ?Result .
# ?Result mlmo:hasFeature ?Image .
# ?Image lio:depicts ?Person .
# ?Person fibo-fnd-aap-a:hasName ?Name .
# ?Result lcc-lr:hasTag ?classification .
# filter (?classification = ?Name)
# """
baseNumTruePositive = """
select (count(distinct ?Result) as ?truePositive)
where {
?ResultSet fibo-fnd-arr-arr:hasConstituent ?Result .
?Result mlmo:hasFeature ?Image .
?Result mlmo:hasFeature ?Image2 .
filter(?Image != ?Image2)
?Image lio:depicts ?Person .
?Image2 lio:depicts ?Person2 .
?Person fibo-fnd-aap-a:hasName ?Name .
?Person2 fibo-fnd-aap-a:hasName ?Name2 .
?Result lcc-lr:hasTag "Match"^^xsd:string .
filter (?Name = ?Name2)
"""
baseNumTrueNegative = """
select (count(distinct ?Result) as ?trueNegative)
where {
?ResultSet fibo-fnd-arr-arr:hasConstituent ?Result .
?Result mlmo:hasFeature ?Image .
?Result mlmo:hasFeature ?Image2 .
filter(?Image != ?Image2)
?Image lio:depicts ?Person .
?Image2 lio:depicts ?Person2 .
?Person fibo-fnd-aap-a:hasName ?Name .
?Person2 fibo-fnd-aap-a:hasName ?Name2 .
?Result lcc-lr:hasTag "Not a Match"^^xsd:string .
filter (?Name != ?Name2)
"""
mugshotQuery = """
?Image a img:PosedImage.
# Indoors
?Image lio:hasDepictedBackground ?background .
?background a img:Indoors .
# No Face occlusions
OPTIONAL{
?Image frma:hasOcclusion ?occlusion.
}
minus{
?occlusion a ?FaceOcclusionClass .
?FaceOcclusionClass rdfs:subClassOf* frma:FaceOcclusion .
}
# Image Fidelity not blurry
?Image img:fidelityDescribedBy ?fidelity .
minus{
?fidelity a img:BlurryImageFidelity .
}
"""
occlusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?occlusionClass .
?occlusionClass rdfs:subClassOf* frma:Occlusion .
"""
cervicalOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?CervicalOcclusionClass .
?CervicalOcclusionClass rdfs:subClassOf* frma:CervicalOcclusion .
"""
FaceOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?FaceOcclusionClass .
?FaceOcclusionClass rdfs:subClassOf* frma:FaceOcclusion .
"""
LowerFaceOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?LowerFaceOcclusionClass .
?LowerFaceOcclusionClass rdfs:subClassOf* frma:LowerFaceOcclusion .
"""
UpperFaceOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?UpperFaceOcclusionClass .
?UpperFaceOcclusionClass rdfs:subClassOf* frma:UpperFaceOcclusion .
"""
BuccalOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?BuccalOcclusionClass .
?BuccalOcclusionClass rdfs:subClassOf* frma:BuccalOcclusion .
"""
OralOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?OralOcclusionClass .
?OralOcclusionClass rdfs:subClassOf* frma:OralOcclusion .
"""
MentalOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?MentalOcclusionClass .
?MentalOcclusionClass rdfs:subClassOf* frma:MentalOcclusion .
"""
ParotidOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?ParotidOcclusionClass .
?ParotidOcclusionClass rdfs:subClassOf* frma:ParotidOcclusion .
"""
ZygomaticOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?ZygomaticOcclusionClass .
?ZygomaticOcclusionClass rdfs:subClassOf* frma:ZygomaticOcclusion .
"""
AuricleOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?AuricleOcclusionClass .
?AuricleOcclusionClass rdfs:subClassOf* frma:AuricleOcclusion .
"""
CranialOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?CranialOcclusionClass .
?CranialOcclusionClass rdfs:subClassOf* frma:CranialOcclusion .
"""
FrontalOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?FrontalOcclusionClass .
?FrontalOcclusionClass rdfs:subClassOf* frma:FrontalOcclusion .
"""
OcularOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?OcularOcclusionClass .
?OcularOcclusionClass rdfs:subClassOf* frma:OcularOcclusion .
"""
NasalOcclusionQuery = """
?Image frma:hasOcclusion ?occlusion .
?occlusion a ?NasalOcclusionClass .
?NasalOcclusionClass rdfs:subClassOf* frma:NasalOcclusion .
"""
CheekQuery = """
?Person <http://purl.obolibrary.org/obo/BFO_0000051> ?Face .
?Face <http://purl.obolibrary.org/obo/BFO_0000051> ?cheek .
?Face a <http://purl.obolibrary.org/obo/UBERON_0001456> .
?cheek a ?cheekClass .
?cheekClass rdfs:subClassOf* <http://purl.obolibrary.org/obo/UBERON_0001567> .
"""
HighCheekQuery = """
?Person <http://purl.obolibrary.org/obo/BFO_0000051> ?Face .
?Face <http://purl.obolibrary.org/obo/BFO_0000051> ?cheek .
?Face a <http://purl.obolibrary.org/obo/UBERON_0001456> .
?cheek a pfd:HighCheekbones .
"""
RosyCheekQuery = """
?Person <http://purl.obolibrary.org/obo/BFO_0000051> ?Face .
?Face <http://purl.obolibrary.org/obo/BFO_0000051> ?cheek .
?Face a <http://purl.obolibrary.org/obo/UBERON_0001456> .
?cheek a pfd:RosyCheeks .
"""
ChinQuery = """
?Person <http://purl.obolibrary.org/obo/BFO_0000051> ?Face .
?Face <http://purl.obolibrary.org/obo/BFO_0000051> ?chin .
?Face a <http://purl.obolibrary.org/obo/UBERON_0001456> .
?chin a <http://purl.obolibrary.org/obo/UBERON_0008199> .
"""
DoubleChinQuery = """
?Person <http://purl.obolibrary.org/obo/BFO_0000051> ?Face .
?Face <http://purl.obolibrary.org/obo/BFO_0000051> ?chin .
?Face a <http://purl.obolibrary.org/obo/UBERON_0001456> .
?chin a pfd:DoubleChin .
"""
RoundJawQuery = """
?Person <http://purl.obolibrary.org/obo/BFO_0000051> ?Face .
?Face <http://purl.obolibrary.org/obo/BFO_0000051> ?chin .
?Face a <http://purl.obolibrary.org/obo/UBERON_0001456> .
?chin a pfd:RoundJaw .
"""
DemographicQuery = """
?Person pfd:hasDemographic ?Demo .
?Demo a ?demoClass .
?demoClass rdfs:subClassOf* pfd:Demographic .
"""
AgeRangeQuery = """
?Person pfd:hasDemographic ?Demo .
?Demo a ?demoClass .
?demoClass rdfs:subClassOf* pfd:AgeRange .
"""
BabyQuery = """
?Person pfd:hasDemographic ?Demo .
?Demo a pfd:Baby .
"""
ChildQuery = """
?Person pfd:hasDemographic ?Demo .
?Demo a pfd:Child .
"""
MiddleAgedQuery = """
?Person pfd:hasDemographic ?Demo .
?Demo a pfd:MiddleAged .
"""
SeniorQuery = """
?Person pfd:hasDemographic ?Demo .
?Demo a pfd:Senior .
"""
YouthQuery = """
?Person pfd:hasDemographic ?Demo .
?Demo a pfd:Baby .
"""
EthnicityQuery = """
?Person pfd:hasDemographic ?Demo .
?Demo a ?demoClass .
?demoClass rdfs:subClassOf* pfd:Ethnicity .
"""
AsianQuery = | |
<gh_stars>1-10
# ----------------------------------------------------------------------------------------------------------------------
# Analytics API
# ----------------------------------------------------------------------------------------------------------------------
# imports
import datetime
import os
import io
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as plt_dates
from collections import OrderedDict
# third party imports
from basketball_reference_web_scraper import client
# relative imports
from .constants import Vars
from .team_box_score import TeamBoxScore
def get_player_box_score(name, logger, date_obj=None, timeout=3):
"""
Gets the box score for the desired player.
:param str name: Name of the player to search for.
:param logger: Logging object.
:param datetime.datetime date_obj: Datetime object for starting day to search.
:param int timeout: Number of days to search before giving up.
:return: Box score for the player if found.
:rtype: dict
"""
name = name.lower()
if date_obj is None:
date_obj = datetime.datetime.today()
bs = None
while True:
if timeout > 0:
logger.info('Attempting date: %s' % date_obj.strftime('%y-%m-%d'))
found = False
box_scores = client.player_box_scores(day=date_obj.day, month=date_obj.month, year=date_obj.year)
for box_score in box_scores:
if name in box_score['name'].lower():
bs = box_score
found = True
break
if found:
break
date_obj -= datetime.timedelta(days=1)
timeout -= 1
else:
logger.info("Timeout reached.")
break
return bs, date_obj
def get_team_box_score(team, date_obj=None, timeout=3):
"""
Gets the team box score data for a specific day.
:param str team: The team to search for.
:param datetime.datetime date_obj: Datetime object for starting day to search.
:param int timeout: Number of days to search before giving up.
:return:
"""
if date_obj is None:
date_obj = datetime.datetime.today()
team_bs = None
while True:
if timeout > 0:
team_bs = client.team_box_scores(day=date_obj.day, month=date_obj.month, year=date_obj.year)
# todo
date_obj -= datetime.timedelta(days=1)
timeout -= 1
else:
break
return team_bs
def get_daily_box_scores(date_obj=None, timeout=1):
"""
Gets all player box scores for a specific day. The default for this is only the one date specified.
:param datetime.datetime date_obj: Datetime object for starting day to search.
:param int timeout: Number of days to search before giving up.
:return: All box scores sorted by team.
:rtype: OrderedDict
"""
team_dict = OrderedDict()
if date_obj is None:
date_obj = datetime.datetime.today()
while True:
if timeout > 0:
teams = get_teams_played_on_date(date_obj=date_obj)
if len(teams) > 0:
all_box_scores = client.player_box_scores(day=date_obj.day, month=date_obj.month, year=date_obj.year)
for team in teams:
team_dict[team] = []
for player in all_box_scores:
team_dict[player['team'].name].append(player)
break
date_obj -= datetime.timedelta(days=1)
timeout -= 1
else:
break
return team_dict, date_obj
def get_teams_played_on_date(date_obj=None, timeout=1):
"""
Gets a list of all teams that played on the provided date.
:param datetime.datetime date_obj: Datetime object for starting day to search.
:param int timeout: Number of days to search before giving up.
:return: The active teams on the given date.
:rtype: list
"""
teams = []
if date_obj is None:
date_obj = datetime.datetime.today()
while True:
if timeout > 0:
team_box_scores = client.team_box_scores(day=date_obj.day, month=date_obj.month, year=date_obj.year)
if len(team_box_scores) > 1:
teams = [entry['team'].name for entry in team_box_scores]
break
date_obj -= datetime.timedelta(days=1)
timeout -= 1
else:
break
return teams
def convert_to_minutes(seconds_played):
"""
Converts seconds into minutes.
:param seconds_played:
:return: Minutes played
:rtype: float
"""
minutes = seconds_played / 60.0
return round(minutes, 2)
def get_true_shooting(points, fga, tpfga, fta):
"""
Calculates true shooting percentage.
:param int points: Points
:param int fga: Field goals attempted
:param int tpfga: Three point field goals attempted
:param int fta: Free throws attempted
:return: True shooting percentage
:rtype: float
"""
try:
ts = points / (2.0 * ((fga + tpfga) + 0.44 * fta))
except ZeroDivisionError:
ts = 0
return round(ts, 3)
def get_assist_turnover_ratio(assists, turnovers):
"""
Calculates the ratio of assists to turnovers.
:param assists: Number of assists.
:param turnovers: Number of turnovers.
:return: The ratio
:rtype: float
"""
try:
ratio = float(assists) / turnovers
except ZeroDivisionError:
ratio = float(assists)
return round(ratio, 2)
def check_supported_stats(stats):
"""
Checks a list of strings to determine if the stat type is supported.
:param stats: The stats to check.
:return: Indicates if all provided stats are acceptable.
:rtype: bool
"""
valid = True
for stat in stats:
if stat not in Vars.supported_stats:
valid = False
break
return valid
def convert_team_name(team):
"""
Converts team string into proper casing format
:param str team: Team enum name
:return: Converted string
"""
return team.title().replace('_', ' ')
# ----------------------------------------------------------------------------------------------------------------------
# Pandas interactions
# ----------------------------------------------------------------------------------------------------------------------
def get_existing_data_frame(csv_path, logger):
"""
Determines if a data frame already exists, and returns the data frame if true. Returns None if does not exist.
:param str csv_path: Path of the csv file.
:param logger: Instance of logger object.
:return: Data frame if exists, None otherwise
:rtype: pd.DataFrame
"""
df = None
if os.path.exists(csv_path):
logger.info("Existing data frame found.")
df = pd.read_csv(csv_path, index_col=0)
return df
def gather_new_on_date(date, csv, logger):
"""
Gathers new player box score data from a specific date and updates the given csv if provided.
:param datetime.datetime date: The date to search on
:param str csv: The path to the csv
:param logger: Logging object
:return: The pandas.DataFrame object
"""
team_box_scores = []
df = get_existing_data_frame(csv, logger=logger)
daily_box_scores, found_date = get_daily_box_scores(date_obj=date)
for team in daily_box_scores.keys():
team_box_scores.append(TeamBoxScore(box_scores=daily_box_scores[team],
team_box_score=[],
team_name=team,
date=found_date))
new_df = create_data_frame_from_team_box_scores(team_box_scores=team_box_scores, logger=logger)
if df is None:
logger.info('There was not an existing data frame.')
df = new_df
else:
logger.info('Appending new data frame of shape: %s' % (new_df.shape,))
temp_df = df.append(new_df, sort=False)
temp_size = temp_df.shape[0]
# add new columns with ops from existing data
temp_df['minutes_played'] = temp_df['seconds_played'].apply(convert_to_minutes)
temp_df['true_shooting'] = temp_df.apply(
lambda x: get_true_shooting(x['points'],
x['attempted_field_goals'],
x['attempted_three_point_field_goals'],
x['attempted_free_throws']),
axis=1)
temp_df['assist_turnover_ratio'] = temp_df.apply(
lambda x: get_assist_turnover_ratio(x['assists'],
x['turnovers']),
axis=1)
temp_df.drop_duplicates(inplace=True)
temp_size = temp_size - temp_df.shape[0]
logger.info('Dropped %s duplicates' % temp_size)
logger.info('Dropped %s duplicates' % temp_size)
df = temp_df
logger.info('Shape of DataFrame object: %s' % (df.shape,))
df.to_csv(csv)
return df
def create_data_frame_from_team_box_scores(team_box_scores, logger):
"""
Creates a pandas data frame object from a list of team box score objects.
:param list team_box_scores: Team box score objects
:param logger: Instance of logger object
:return: Pandas data frame
:rtype: pd.DataFrame
"""
logger.info(" Appending new data frame from %s teams" % len(team_box_scores))
data = {}
index = []
for stat in Vars.supported_stats:
data[stat] = []
for tbs in team_box_scores:
index.extend(tbs.get_players())
data['points'].extend(tbs.get_points())
data['rebounds'].extend(tbs.get_rebounds())
data['assists'].extend(tbs.get_assists())
data['made_field_goals'].extend(tbs.get_made_field_goals())
data['made_three_point_field_goals'].extend(tbs.get_made_three_point_field_goals())
data['made_free_throws'].extend(tbs.get_made_free_throws())
data['offensive_rebounds'].extend(tbs.get_offensive_rebounds())
data['defensive_rebounds'].extend(tbs.get_defensive_rebounds())
data['team'].extend(tbs.get_teams())
data['location'].extend(tbs.get_locations())
data['opponent'].extend(tbs.get_opponents())
data['outcome'].extend(tbs.get_outcomes())
data['seconds_played'].extend(tbs.get_seconds_played())
data['attempted_three_point_field_goals'].extend(tbs.get_attempted_three_point_field_goals())
data['attempted_free_throws'].extend(tbs.get_attempted_free_throws())
data['attempted_field_goals'].extend(tbs.get_attempted_field_goals())
data['steals'].extend(tbs.get_steals())
data['blocks'].extend(tbs.get_blocks())
data['turnovers'].extend(tbs.get_turnovers())
data['personal_fouls'].extend(tbs.get_personal_fouls())
data['game_score'].extend(tbs.get_game_scores())
data['date'].extend(tbs.get_dates())
if data['team']:
teams = list(set(data['team']))
for team in teams:
logger.info(' %s' % team)
df = pd.DataFrame(data, index=index)
return df
def get_team_date_df(df, team, date):
"""
Attempts to make a pandas data frame of all player box scores on a certain day.
:param pandas.DataFrame df: The data frame to search.
:param str team: The team to search for.
:param datetime.datetime date: The date to search on.
:return: Team data frame if found
"""
team_df = None
if isinstance(date, datetime.datetime):
converted_date = date.strftime('%y_%m_%d')
team_df = df[(df['date'] == converted_date) & (df['team'] == team)]
return team_df
def filter_df_on_team_names(df, teams):
"""
Returns a new data frame object only containing rows where the team matches any of the provided team names.
:param pandas.DataFrame df: The data frame to search.
:param list teams: The teams to filter on.
:return: Team filtered data frame, or the original if none of the specified teams are found.
"""
teams = [entry.upper().replace(' ', '_') for entry in teams]
team_df = df[df['team'].isin(teams)]
return team_df
def get_most_recent_update_date(df, date_col='date'):
"""
Gets the most recent date from the pandas.DataFrame provided.
:param pandas.DataFrame df: The pandas.DataFrame object
:param str date_col: The column to reference in the DataFrame object
:return: The date found
:rtype: datetime.datetime
"""
temp_series = pd.to_datetime(df[date_col], format='%y_%m_%d')
temp_date = str(temp_series.max()).split()[0].split('-')
return datetime.datetime(year=int(temp_date[0]), month=int(temp_date[1]), day=int(temp_date[2]))
def get_team_result_on_date(team, date, df):
"""
Calculates the team scores on a particular date.
:param str team: Team to search for
:param datetime.datetime date: The date to search on
:param pandas.DataFrame df: The data set to search in
:return: The score as a string, ex: 97-88. The desired team's score will always be first.
"""
converted_team = team.replace(' ', '_').upper()
converted_date = date.strftime('%y_%m_%d')
team_df = df[(df['team'] == converted_team) & (df['date'] == converted_date) & (df['points'] > 0)]
opp_team = team_df['opponent'].values[0]
opp_df = df[(df['team'] == opp_team) & (df['date'] == converted_date) & (df['points'] > 0)]
res = '%s-%s' % (int(np.sum(team_df['points'])), int(np.sum(opp_df['points'])))
return res
def create_scatter_plot_with_trend_line(x_key, y_key, df, **kwargs):
"""
Creates a scatter plot for two different series of a pandas data frame.
:param str x_key: The column name in the data frame to use for the x axis.
| |
<filename>dss_server.py
# -*- coding: utf-8 -*-
"""
dss_server.py
Provides class DSSServer, a configuration-based master server for DSN antennae.
If this is run as a standalone program then the appropriate environment should
be activated.
On crux ``pipenv`` is the environment manager. ``/home/ops/dss-monitor-control``
has the correct environment.
On host ``kuiper``, ``conda`` is the environment manager:
```
$ source activate DSSserver
```
Examples
========
```
$ python dss_server2.py --help
usage: dss_server2.py [-h] [--verbose] [--simulated] [--flask] [--flaskio]
Fire up DSS control server.
optional arguments:
-h, --help show this help message and exit
--verbose, -v In verbose mode, the log level is DEBUG
--simulated, -s In simulated mode, DSS Server won't attempt to connect to
antenna hardware server.
--flask, -f Run server as flask server
--flaskio, -fio Run server as flask io server
```
Note that the Flask interface does not support callbacks in the server, that is,
all callback handler trying to use a callback method to return data will fail.
Example of session to test the server::
```
(DSSserver) kuiper@kuiper:~$ python
>>> hardware = {"Antenna": False,"Receiver": False,"Backend": False,"FrontEnd": False}
>>> from MonitorControl.Configurations import station_configuration
>>> observatory, equipment = station_configuration('WBDC2_K2',hardware=hardware)
>>> from MonitorControl.apps.server.dss_server2 import DSSServer
>>> server = DSSServer(observatory, equipment)
>>> server.observatory
Observatory "Canberra"
>>> server.equipment
{'FE_selector': None, 'FrontEnd': K_4ch "K",
'Telescope': None, 'Antenna': DSN_Antenna "DSS-43",
'Receiver': WBDC2 "WBDC-2", 'Rx_selector': None,
'Backend': SAOspec "SAO spectrometer", 'sampling_clock': None,
'IF_switch': IFswitch "Patch Panel"}
>>> server.info
{'project': {'name': 'TAMS', 'source_dir': '/usr/local/projects/TAMS/Observations'},
'sources': {},
'verifiers': {},
'info_save_dir': '/usr/local/RA_data/status/DSSServer',
'point': {'current_source': None},
'tsys_calibration': {'date': None,
'el': None,
'running': False,
'data_dir': '/usr/local/RA_data/tsys_calibration_data',
'tsys_factors': [ 999883083.3775496, 421958318.055633,
1374067124.697352, 705797017.1087824]},
'tip': {'running': False, 'data_dir': '/usr/local/RA_data/tipping_data'},
'boresight': {'running': False,
'data_dir': '/usr/local/RA_data/boresight_data',
'offset_xel': 0.0,
'offset_el': 0.0}}
>>> server._get_observer_info_dict()
{'lat': -35:24:14.3, 'elevation': 688.867, 'epoch': 2000/1/1 12:00:00,
'lon': -211:01:11.8, 'date': 2019/2/2 21:28:07}
>>> server.get_projects()
[u'FREGGS', u'UV_Ceti', u'ISM_RRL', u'67P', u'AUTO_PSR']
>>> server.get_activities()
['AUT1', 'BMP0', 'EGG0',
'PSR0', 'PSR1', 'PSR2', 'PSR3', 'PSR4', 'PSR5', 'PSR6',
'RRL0',
'UVC0']
>>> server.get_equipment()
{'Backend': SAOspec "SAO spectrometer", 'FrontEnd': K_4ch "K",
'IF_switch': IFswitch "Patch Panel", 'Antenna': DSN_Antenna "DSS-43",
'Receiver': WBDC2 "WBDC-2"}
```
Notes
=====
The Flask client can make two sorts of calls on the server, with arguments and
without. If no arguments are given, then the server method should have a normal
``return``. If the client passes arguments, then the method should have a
decorator ``@async_method`` and should return data to the client with callbacks
with names like this:
```
servermethod.cb(data)
```
A decorated method can still have a normal ``return`` for when it is called
from within the server program itself.
"""
import astropy
import astropy.io.fits as pyfits
import astropy.units as u
import calendar
import copy
import pickle as pickle
import datetime
import dateutil
import ephem
import logging
import h5py
import importlib
import json
import math
import numpy as np
import os
import Pyro5
import queue
import random
import threading
import time
import socket
import signal
import six
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
module_logger = logging.getLogger(__name__)
import Astronomy as A
#from Astronomy.Ephem import SerializableBody
import Astronomy.Ephem as Aeph
#from Astronomy.redshift import V_LSR
import Astronomy.redshift as Ared
import Data_Reduction.boresights.boresight_manager as BSM
import Data_Reduction.boresights.analyzer as BSA
import Data_Reduction.FITS.DSNFITS as DSNFITS
#from Data_Reduction.FITS.DSNFITS import FITSfile
import DatesTimes as DT
#from DatesTimes import UnixTime_to_datetime
import MonitorControl as MC
#from MonitorControl import ActionThread, MonitorControlError
import MonitorControl.DSS_server_cfg as DSScfg
#from MonitorControl.DSS_server_cfg import tams_config # not really TAMS
import MonitorControl.Configurations as MCcfg
#from MonitorControl.Configurations import station_configuration
from MonitorControl.Configurations.GDSCC.WVSR import station_configuration as std_configuration
import MonitorControl.Configurations.projects as projcfg # project configuration
import MonitorControl.Receivers.DSN as DSNrx
#from MonitorControl.Receivers.DSN import DSN_rx
import Physics.Radiation.Lines.recomb_lines as recomb
#from Physics.Radiation.Lines.recomb_lines import recomb_freq
import Radio_Astronomy.bands as bands
#from Radio_Astronomy.bands import frequency_to_band
import support
from support.asyncio.pyro import async_method
import support.logs # support.logs import setup_logging
import support.flask_server # from support.flask_server import FlaskServer
import support.pyro.socket_error # from support.pyro.socket_error import register_socket_error
import support.test #from support.test import auto_test
import support.text #from support.text import make_title
from local_dirs import data_dir, proj_conf_path, projects_dir
# this is needed so it looks like a package even when it is run as a program
if __name__ == "__main__" and __package__ is None:
__package__ = "MonitorControl"
__all__ = ["DSSServer"]
def nowgmt():
return time.time()+ time.altzone
def logtime():
return datetime.datetime.utcnow().strftime("%H:%M:%S.%f")[:-3]
support.pyro.socket_error.register_socket_error()
# temporary; need to construct from project, dss, and band
configs = {
'67P': {'67P0': 'WBDC2_K2'},
'AUTO_PSR': {'PSR0': 'WVSR14L',
'PSR1': 'WVSR14L',
'PSR2': 'WVSR14L',
'PSR3': 'WVSR14L',
'PSR4': 'WVSR14S',
'PSR5': 'WVSR14L',
'PSR6': 'WVSR14L'
},
'FREGGS': {'EGG0': 'WVSR14X'},
'ISM_RRL': {'RRL0': 'WBDC2_K2'},
'TAMS': {'TMS0': 'WBDC2_K2'},
'UV_Ceti': {'UVC0': 'WVSR14L'}
}
# defaults
obsmode = 'LINEBPSW'
veldef = 'RADI-OBS'
equinox = 2000
restfreq = 22235.120 # H2O maser, MHz
@Pyro5.api.expose
class DSSServer(support.flask_server.FlaskServer):
"""
Server that integrates functionality from a DSS station's hardware configuration.
Many calibration and observing routines rely on integrating monitor and control
data from various hardware subsystems in an antenna. For example, boresight,
or pointing, involves changing antenna position offsets while reading
power meter data.
Attributes by Category:
Program Data and Parameters:
activity: sub-class of project which defines the configuration
boresight_manager: (BoresightManager) post processing manager object
for retrieving old boresight results.
configs: a list of all known hardware configurations
fe_signals
#specQueue
specHandler
info: (dict) dictionary containing information about current
status of different long running calibration and
observation methods, as well as sources, and verifiers.
last_rec
last_scan
log_n_avg
lock
logger
n_scans: (int) number of scans in current sequence
rx_signals
Observing parameters:
bandwidth
beams
el_offset
elevation
humidity
location
observatory: (MonitorControl.Observatory) Observatory instance
obsfreq
obsmode
pols
pressure
record_int_time
restfreq
SB
signal_end
styles
telescope
temperature
winddirection
windspeed
xel_offset
Source Information:
activity
azimuth
dec
ordered
RA
source
Data:
bintabHDU
dims
filename
gallery
hdulist
HDUs
fitsfile
polcodes
scans
Equipment:
equipment: (dict) dictionary describing antenna/station hardware
frontend
receiver
patchpanel
backend
roachnames
num_chan
Methods:
Start-up and shut-down methods:
__init__(...)
set_info(path, val)
get_info(path=None)
save_info()
load_info()
close()
Hardware control:
configure(import_path, *args, **kwargs)
hdwr(hdwr, method_name, *args, **kwargs)
list_hdwr()
Source management:
load_sources(loader="json")
get_sources(source_names=None, when=None, filter_fn=None, formatter=None)
report_source_info(name_or_dict, units="degrees")
is_within(name_or_dict, bounds, axis="el")
_get_source_from_str_or_dict(name_or_dict, **kwargs)
_get_src_info_dict(src_dict)
_add_src_info_to_file(f_obj, src_dict)
Observatory details:
_get_observer_info_dict()
Antenna control:
point(name_or_dict)
Data acquisition}:
get_tsys(timestamp=False): return list of Tsys obtained from HP power meters
single_scan(feed, scan_time=60.0, integration_time=5.0): returns a single
spectrum
two_beam_nod(cycles=1, scan_time=60.0, integration_time=None): starts a
sequence of spectral scans in beam and position switching mode
single_beam_nodding(cycles=1, time_per_scan=60.0, integration_time=5.0,
power_meter_monitor_interval=2.0, antenna_monitor_interval=2.0): starts
a position switching sequence
Calibration:
scanning_boresight(el_previous_offset, xel_previous_offset,
limit=99.0, sample_rate=0.3, rate=3.0, settle_time=10.0,
src_name_or_dict=None,two_direction=True, additional_offsets=None,
channel=0, attrs=None)
stepping_boresight(el_previous_offset, xel_previous_offset,
n_points=9, integration_time=2, settle_time=10.0, two_direction=True,
src_name_or_dict=None, additional_offsets=None, channel=0, attrs=None)
get_boresight_analyzer_object(file_path)
get_most_recent_boresight_analyzer_object()
process_minical_calib(cal_data, Tlna=25, Tf=1, Fghz=20, TcorrNDcoupling=0)
tsys_calibration(settle_time=10.0, pm_integration_time=5.0)
stop_tsys_calibration()
tip()
File management:
_create_calibration_file_path(...):
_create_calibration_file_obj(...):
get_boresight_file_paths(...):
Miscellaneous:
server_time(): returns current time
Pyro5Server Initialization Arguments(**kwargs):
cls: a class whose methods and attribute the server accesses by
instantiating an object.
obj: an object whose methods and attributes the server accesses.
cls_args:
cls_kwargs:
name: optional; defaults to class name
logger: optional logger; defaults to Pyro5Server logger
kwargs: EventEmitter keyword arguments
"""
fillcolors = {
"antenna": "#FF4136",
"calibrator": "#B10DC9",
"catalog-priority-1": "#001F3F",
"catalog-priority-2": "#0074D9",
"catalog-priority-3": "#7FDBFF",
"catalog-priority-4": "#39CCCC",
"catalog-priority-5": "#94D6E7",
"catalog-priority-6": "#52B552",
"catalog-priority-7": "#A5DE94",
"catalog-priority-8": "#E78CC6",
"catalog-priority-9": "#D66321",
"catalog-priority-15": "#CE84C6",
"catalog-attention": "#FFD7B5",
"catalog-done": "#F7FFCE",
"catalog-far": "#E0E0E0",
"catalog-intermediate": "#A1A1A1",
"catalog-near": "#404040",
"known-HII": "#FFFF10",
"known-line-source": "#D6EF39",
"known-maser": "#FF851B"
}
order = [
"antenna", "calibrator",
"catalog-priority-1", "catalog-priority-2", "catalog-priority-3",
"catalog-priority-4", "catalog-priority-5", "catalog-priority-6",
"catalog-priority-7", "catalog-priority-8", "catalog-priority-9",
"catalog-priority-15",
"catalog-attention", "catalog-done",
"catalog-far", "catalog-intermediate", "catalog-near",
"known-HII", "known-line-source", "known-maser"]
def __init__(self, context, # required
project="TAMS",
import_path=None,
config_args=None,
config_kwargs=None,
boresight_manager_file_paths=None,
boresight_manager_kwargs=None,
**kwargs):
"""
initialize a DSSServer
Args:
observatory: see class documentation
equipment: see class documentation
import_path: (str) a path whose corresponding module
has a station_configuration function
config_args: (tuple/list) passed to station_configuration
config_kwargs: (dict) passed to station_configuration
boresight_manager_file_paths: t.b.d.
"""
super(DSSServer, self).__init__(obj=self, **kwargs)
# a valid project must be provided
if project not in projcfg.get_projects():
self.logger.error("__init__: %s not recognized", project)
raise RuntimeError("%s is invalid projecty" % project)
self.logger.debug("__init__: project is %s", project)
self.project = project
self.project_dir = projects_dir + self.project + "/"
self.project_conf_path = proj_conf_path + self.project + "/"
self.status_dir = self.project_dir+ "Status/"+ self.__class__.__name__ + "/"
# get a dict with context names and paths to their configurations
self.configs = self.get_configs()
self.logger.debug("__init__: configurations: %s", self.configs)
# allow for non-standard configurations; needed even without hardware
# this creates attributes:
# observatory
# equipment
self._config_hw(context,
import_path=import_path,
config_args=config_args,
config_kwargs=config_kwargs)
# initialize a boresight manager
self._config_bore(boresight_manager_file_paths=boresight_manager_file_paths,
boresight_manager_kwargs=boresight_manager_kwargs)
self._init_info()
self.activity = None
# categories present in the current set of sources in the correct order
self.ordered = []
# gallery of spectra by scan and record
self.gallery = {}
# convenient attributes
self.telescope = self.equipment['Antenna']
self.frontend = self.equipment['FrontEnd']
self.receiver = self.equipment['Receiver']
self.patchpanel = self.equipment['IF_switch']
self.backend = self.equipment['Backend']
self.logger.debug("__init__: backend is %s", self.backend)
self.roachnames = self.backend.roachnames
# initialize a FITS file
self.initialize_FITS()
# keep track of scans received
self.scans = [] # don't confuse with self.backend.scans
# telescope location
self.logger.debug("__init__: | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
https://docs.python.org/2/library/subprocess.html#popen-objects
http://stackoverflow.com/questions/1606795/catching-stdout-in-realtime-from-subprocess
http://askubuntu.com/questions/458041/find-x-window-name
http://stackoverflow.com/questions/9681959/how-can-i-use-xdotool-from-within-a-python-module-script
http://manpages.ubuntu.com/manpages/trusty/en/man1/avconv.1.html
http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
xwininfo gives window info: xwininfo: Window id: 0x2800010 "0 A.D."
xdotool:
sudo apt-get install libx11-dev libxtst-dev libXinerama-dev
make
make install
https://github.com/nullkey/glc/wiki/Capture
glc-capture --start --fps=30 --resize=1.0 --disable-audio --out=pyro.glc ./launcher.py
glc-play pyro.glc -o - -y 1 | avconv -i - -an -y pyro.mp4
avconv -i pyro.mp4 -codec copy -ss 15 -y pyro01.mp4
qt-faststart pyro01.mp4 pyro02.mp4
mplayer pyro02.mp4
'''
VERSION = "0.2.0"
import os, sys, subprocess, time, json
from time import sleep
sys.dont_write_bytecode = True
## maps etc.
from data import data
bcolors = {
"Bold": "\033[1m",
"Header" : "\033[95m",
"LBlue" : "\033[94m", ## light blue
"DBlue" : "\033[34m", ## dark blue
"OKGreen" : "\033[32m", ## dark Green
"Green" : "\033[92m", ## light green
"Warn" : "\033[33m", ## orange
"Fail" : "\033[91m",
"End" : "\033[0m",
# orange='\033[33m'
}
def printc(color, text) :
print (bcolors[color] + text + bcolors["End"])
def stdc(color, text) :
sys.stdout.write (bcolors[color] + text + bcolors["End"])
folders = {
"pro" : "/home/noiv/Desktop/0ad", ## project
"rel" : "/usr/games/0ad", ## release
"trunk" : "/Daten/Projects/Osiris/ps/trunk", ## svn
"share" : "/home/noiv/.local/share", ## user mod
}
## the game binary
locations = {
"rel" : folders["rel"], ## release
"svn" : folders["trunk"] + "/binaries/system/pyrogenesis", ## svn
"hbl" : folders["share"] + "/0ad/mods/hannibal/simulation/ai/hannibal/", ## bot folder
"deb" : folders["share"] + "/0ad/mods/hannibal/simulation/ai/hannibal/_debug.js", ## bot folder
"log" : folders["pro"] + "/last.log", ## log file
"ana" : folders["pro"] + "/analysis/", ## analysis csv file
}
## Hannibal log/debug options + data, readable by JS and Python
DEBUG = {
## default map
"map": "scenarios/Arcadia 02",
## counter
"counter": [],
## num: 0=no numerus
## xdo: move window, sim speed
## fil can use files
## log: 0=silent, 1+=errors, 2+=warnings, 3+=info, 4=all
## col: log colors
## sup: suppress, bot does not intialize (saves startup time)
## tst: activate tester
"bots": {
"0" : {"num": 0, "xdo": 0, "fil": 0, "log": 4, "sup": 1, "tst": 0, "col": "" },
"1" : {"num": 1, "xdo": 1, "fil": 1, "log": 4, "sup": 0, "tst": 1, "col": "" },
"2" : {"num": 0, "xdo": 0, "fil": 0, "log": 3, "sup": 0, "tst": 1, "col": "" },
"3" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"4" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"5" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"6" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"7" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
"8" : {"num": 0, "xdo": 0, "fil": 0, "log": 2, "sup": 1, "tst": 0, "col": "" },
}
}
## keep track of open file handles
files = {}
## civs to choose from at start
civs = [
"athen",
"brit",
"cart",
"celt",
"gaul",
"hele",
"iber",
"mace",
"maur",
"pers",
"ptol",
"rome",
"sele",
"spart",
]
def buildCmd(typ="rel", map="Arcadia 02", bots=2) :
## see /ps/trunk/binaries/system/readme.txt
cmd = [
locations[typ],
"-quickstart", ## load faster (disables audio and some system info logging)
"-autostart=" + map, ## enables autostart and sets MAPNAME; TYPEDIR is skirmishes, scenarios, or random
"-mod=public", ## start the game using NAME mod
"-mod=charts",
"-mod=hannibal",
"-autostart-seed=0", ## sets random map SEED value (default 0, use -1 for random)
"-autostart-size=192", ## sets random map size in TILES (default 192)
# "-autostart-players=2", ## sets NUMBER of players on random map (default 2)
# "-autostart-ai=1:hannibal",
# "-autostart-civ=1:athen", ## sets PLAYER's civilisation to CIV (skirmish and random maps only)
# "-autostart-ai=2:hannibal", ## sets the AI for PLAYER (e.g. 2:petra)
# "-autostart-civ=2:cart", ## sets PLAYER's civilisation to CIV (skirmish and random maps only)
]
## svn does not autoload /user
if typ == "svn" : cmd.append("-mod=user")
## set # of players
cmd.append("-autostart-players=" + str(bots))
## add bots with civ
for bot in range(1, bots +1) :
cmd.append("-autostart-ai=" + str(bot) + ":hannibal")
cmd.append("-autostart-civ=" + str(bot) + ":" + civs[bot -1])
return cmd
def findWindow(title) :
process = subprocess.Popen("xdotool search --name '%s'" % (title), stdout=subprocess.PIPE, shell="FALSE")
windowid = process.stdout.readlines()[0].strip()
process.stdout.close()
return windowid
def xdotool(command) :
subprocess.call(("xdotool %s" % command).split(" "))
def cleanup() :
for k, v in files.iteritems() : v.close()
def writeDEBUG():
fTest = open(locations["deb"], 'w')
fTest.truncate()
fTest.write("var HANNIBAL_DEBUG = " + json.dumps(DEBUG, indent=2) + ";")
fTest.close()
def killDEBUG():
fTest = open(locations["deb"], 'w')
fTest.truncate()
fTest.close()
def processMaps():
proc0AD = None
DEBUG["OnUpdate"] = "print('#! terminate');"
for mp in data["testMaps"] :
DEBUG["map"] = mp
writeDEBUG()
cmd0AD = [pyrogenesis, "-quickstart", "-autostart=" + mp, "-mod=public", "-mod:hannibal", "-autostart-ai=1:hannibal"]
proc0AD = subprocess.Popen(cmd0AD, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print " > " + " ".join(cmd0AD)
try:
for line in iter(proc0AD.stdout.readline, b'') :
sline = line.strip()
if sline.startswith("#! terminate") :
proc0AD.terminate()
sleep(2)
if proc0AD : proc0AD.wait()
if proc0AD : proc0AD.kill()
break
else :
pass
# sys.stdout.write(line)
except KeyboardInterrupt, e :
if proc0AD : proc0AD.terminate()
break
print "done."
def launch(typ="rel", map="Arcadia 02", bots=2):
winX = 1520; winY = 20
doWrite = False
curFileNum = None
idWindow = None
proc0AD = None
def terminate() :
if proc0AD : proc0AD.terminate()
files["log"] = open(locations["log"], 'w')
files["log"].truncate()
DEBUG['map'] = map
writeDEBUG()
cmd0AD = buildCmd(typ, map, bots)
print (" cmd: %s" % " ".join(cmd0AD));
proc0AD = subprocess.Popen(cmd0AD, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
for line in iter(proc0AD.stdout.readline, b'') :
## line has everything
## sline is stripped
## bline is active bot line after colon
sline = line.strip() ## removes nl and wp
bline = ""
id = 0
bot = DEBUG["bots"]["0"]
## detect bot id
if len(sline) >= 2 and sline[1:3] == "::" :
id = sline[0]
bot = DEBUG["bots"][id]
bline = "" if bot["log"] == 0 else sline[3:]
files["log"].write(line)
## terminate everything
if sline.startswith("#! terminate") :
if bot["xdo"] :
print(sline)
terminate()
return
## clear console
elif bline.startswith("#! clear") :
print(sline)
sys.stderr.write("\x1b[2J\x1b[H") ## why not ??
## xdo init
elif bot["xdo"] and bline.startswith("#! xdotool init") :
idWindow = findWindow("0 A.D")
printc("DBlue", " xdo: window id: %s" % idWindow)
xdotool("windowmove %s %s %s" % (idWindow, winX, winY))
## xdo command with echo
elif bot["xdo"] and bline.startswith("#! xdotool ") :
params = " ".join(bline.split(" ")[2:])
printc("DBlue", " X11: " + params)
xdotool(params)
## xdo command without echo
elif bot["xdo"] and bline.startswith("## xdotool ") : ## same, no echo
params = " ".join(bline.split(" ")[2:])
xdotool(params)
## xdo command suppress
elif not bot["xdo"] and bline.startswith("## xdotool ") :
pass
## file open
elif bot["fil"] and bline.startswith("#! open ") :
filenum = bline.split(" ")[2]
filename = bline.split(" ")[3]
files[filenum] = open(filename, 'w')
files[filenum].truncate()
## file append
elif bot["fil"] and bline.startswith("#! append ") :
filenum = bline.split(" ")[2]
dataLine = ":".join(bline.split(":")[1:])
files[filenum].write(dataLine + "\n")
## file write
elif bot["fil"] and bline.startswith("#! write ") :
print(bline)
filenum = bline.split(" ")[2]
filename = bline.split(" ")[3]
files[filenum] = open(filename, 'w')
files[filenum].truncate()
curFileNum = filenum
## file close
elif bot["fil"] and bline.startswith("#! close ") :
filenum = bline.split(" ")[2]
files[filenum].close()
print("#! closed %s at %s" % (filenum, os.stat(filename).st_size))
## bot output
elif bot["log"] > 0 and bline :
if bline.startswith("ERROR :") : stdc("Fail", id + "::" + bline + "\n")
elif bline.startswith("WARN :") : stdc("Warn", id + "::" + bline + "\n")
elif bline.startswith("INFO :") : stdc("OKGreen", id + "::" + bline + "\n")
else : sys.stdout.write("" + bline + "\n")
## suppressed bots - no output
elif bot["log"] == 0:
pass
## hannibal or map or 0AD output
elif line :
if line.startswith("ERROR :") : stdc("Fail", line + "\n")
elif line.startswith("WARN :") : stdc("Warn", line + "\n")
elif line.startswith("INFO :") : stdc("OKGreen", line + "\n")
elif line.startswith("TIMER| ") : pass ## suppress 0AD debugs
elif line.startswith("sys_cursor_create:") : pass
elif line.startswith("AL lib:") : pass
elif line.startswith("Sound:") : pass
else :
sys.stdout.write("" + line)
except KeyboardInterrupt, e :
terminate()
if __name__ == '__main__':
args = sys.argv[1:]
if args[0] == "maps" :
print (" processing maps...")
processMaps(args)
else:
typ = args[0] if len(args) > 0 else "rel"
map = args[1] if len(args) > 1 else "Arcadia 02"
bots = | |
<reponame>CHIMEFRB/ch_util
"""
Tools for point source calibration
This module contains tools for performing point-source calibration.
"""
from abc import ABCMeta, abstractmethod
import inspect
import logging
import numpy as np
import scipy.stats
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from scipy.linalg import lstsq, inv
from ch_util import ephemeris, tools
# Set up logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class FitTransit(object, metaclass=ABCMeta):
"""Base class for fitting models to point source transits.
The `fit` method should be used to populate the `param`, `param_cov`, `chisq`,
and `ndof` attributes. The `predict` and `uncertainty` methods can then be used
to obtain the model prediction for the response and uncertainty on this quantity
at a given hour angle.
Attributes
----------
param : np.ndarray[..., nparam]
Best-fit parameters.
param_cov : np.ndarray[..., nparam, nparam]
Covariance of the fit parameters.
chisq : np.ndarray[...]
Chi-squared of the fit.
ndof : np.ndarray[...]
Number of degrees of freedom.
Abstract Methods
----------------
Any subclass of FitTransit must define these methods:
peak
_fit
_model
_jacobian
"""
_tval = {}
component = np.array(["complex"], dtype=np.string_)
def __init__(self, *args, **kwargs):
"""Instantiates a FitTransit object.
Parameters
----------
param : np.ndarray[..., nparam]
Best-fit parameters.
param_cov : np.ndarray[..., nparam, nparam]
Covariance of the fit parameters.
chisq : np.ndarray[..., ncomponent]
Chi-squared.
ndof : np.ndarray[..., ncomponent]
Number of degrees of freedom.
"""
# Save keyword arguments as attributes
self.param = kwargs.pop("param", None)
self.param_cov = kwargs.pop("param_cov", None)
self.chisq = kwargs.pop("chisq", None)
self.ndof = kwargs.pop("ndof", None)
self.model_kwargs = kwargs
def predict(self, ha, elementwise=False):
"""Predict the point source response.
Parameters
----------
ha : np.ndarray[nha,] or float
The hour angle in degrees.
elementwise : bool
If False, then the model will be evaluated at the
requested hour angles for every set of parameters.
If True, then the model will be evaluated at a
separate hour angle for each set of parameters
(requires `ha.shape == self.N`).
Returns
-------
model : np.ndarray[..., nha] or float
Model for the point source response at the requested
hour angles. Complex valued.
"""
with np.errstate(all="ignore"):
mdl = self._model(ha, elementwise=elementwise)
return np.where(np.isfinite(mdl), mdl, 0.0 + 0.0j)
def uncertainty(self, ha, alpha=0.32, elementwise=False):
"""Predict the uncertainty on the point source response.
Parameters
----------
ha : np.ndarray[nha,] or float
The hour angle in degrees.
alpha : float
Confidence level given by 1 - alpha.
elementwise : bool
If False, then the uncertainty will be evaluated at
the requested hour angles for every set of parameters.
If True, then the uncertainty will be evaluated at a
separate hour angle for each set of parameters
(requires `ha.shape == self.N`).
Returns
-------
err : np.ndarray[..., nha]
Uncertainty on the point source response at the
requested hour angles.
"""
x = np.atleast_1d(ha)
with np.errstate(all="ignore"):
err = _propagate_uncertainty(
self._jacobian(x, elementwise=elementwise),
self.param_cov,
self.tval(alpha, self.ndof),
)
return np.squeeze(np.where(np.isfinite(err), err, 0.0))
def fit(self, ha, resp, resp_err, width=5, absolute_sigma=False, **kwargs):
"""Apply subclass defined `_fit` method to multiple transits.
This function can be used to fit the transit for multiple inputs
and frequencies. Populates the `param`, `param_cov`, `chisq`, and `ndof`
attributes.
Parameters
----------
ha : np.ndarray[nha,]
Hour angle in degrees.
resp : np.ndarray[..., nha]
Measured response to the point source. Complex valued.
resp_err : np.ndarray[..., nha]
Error on the measured response.
width : np.ndarray[...]
Initial guess at the width (sigma) of the transit in degrees.
absolute_sigma : bool
Set to True if the errors provided are absolute. Set to False if
the errors provided are relative, in which case the parameter covariance
will be scaled by the chi-squared per degree-of-freedom.
"""
shp = resp.shape[:-1]
dtype = ha.dtype
if not np.isscalar(width) and (width.shape != shp):
ValueError("Keyword with must be scalar or have shape %s." % str(shp))
self.param = np.full(shp + (self.nparam,), np.nan, dtype=dtype)
self.param_cov = np.full(shp + (self.nparam, self.nparam), np.nan, dtype=dtype)
self.chisq = np.full(shp + (self.ncomponent,), np.nan, dtype=dtype)
self.ndof = np.full(shp + (self.ncomponent,), 0, dtype=np.int)
with np.errstate(all="ignore"):
for ind in np.ndindex(*shp):
wi = width if np.isscalar(width) else width[ind[: width.ndim]]
err = resp_err[ind]
good = np.flatnonzero(err > 0.0)
if (good.size // 2) <= self.nparam:
continue
try:
param, param_cov, chisq, ndof = self._fit(
ha[good],
resp[ind][good],
err[good],
width=wi,
absolute_sigma=absolute_sigma,
**kwargs
)
except Exception as error:
logger.debug("Index %s failed with error: %s" % (str(ind), error))
continue
self.param[ind] = param
self.param_cov[ind] = param_cov
self.chisq[ind] = chisq
self.ndof[ind] = ndof
@property
def parameter_names(self):
"""
Array of strings containing the name of the fit parameters.
Returns
-------
parameter_names : np.ndarray[nparam,]
Names of the parameters.
"""
return np.array(["param%d" % p for p in range(self.nparam)], dtype=np.string_)
@property
def param_corr(self):
"""
Parameter correlation matrix.
Returns
-------
param_corr : np.ndarray[..., nparam, nparam]
Correlation of the fit parameters.
"""
idiag = tools.invert_no_zero(
np.sqrt(np.diagonal(self.param_cov, axis1=-2, axis2=-1))
)
return self.param_cov * idiag[..., np.newaxis, :] * idiag[..., np.newaxis]
@property
def N(self):
"""
Number of independent transit fits contained in this object.
Returns
-------
N : tuple
Numpy-style shape indicating the number of
fits that the object contains. Is None
if the object contains a single fit.
"""
if self.param is not None:
return self.param.shape[:-1] or None
@property
def nparam(self):
"""
Number of parameters.
Returns
-------
nparam : int
Number of fit parameters.
"""
return self.param.shape[-1]
@property
def ncomponent(self):
"""
Number of components.
Returns
-------
ncomponent : int
Number of components (i.e, real and imag, amp and phase, complex) that have been fit.
"""
return self.component.size
def __getitem__(self, val):
"""Instantiates a new TransitFit object containing some subset of the fits."""
if self.N is None:
raise KeyError(
"Attempting to slice TransitFit object containing single fit."
)
return self.__class__(
param=self.param[val],
param_cov=self.param_cov[val],
ndof=self.ndof[val],
chisq=self.chisq[val],
**self.model_kwargs
)
@abstractmethod
def peak(self):
"""Calculate the peak of the transit.
Any subclass of FitTransit must define this method.
"""
return
@abstractmethod
def _fit(self, ha, resp, resp_err, width=None, absolute_sigma=False):
"""Fit data to the model.
Any subclass of FitTransit must define this method.
Parameters
----------
ha : np.ndarray[nha,]
Hour angle in degrees.
resp : np.ndarray[nha,]
Measured response to the point source. Complex valued.
resp_err : np.ndarray[nha,]
Error on the measured response.
width : np.ndarray
Initial guess at the width (sigma) of the transit in degrees.
absolute_sigma : bool
Set to True if the errors provided are absolute. Set to False if
the errors provided are relative, in which case the parameter covariance
will be scaled by the chi-squared per degree-of-freedom.
Returns
-------
param : np.ndarray[nparam,]
Best-fit model parameters.
param_cov : np.ndarray[nparam, nparam]
Covariance of the best-fit model parameters.
chisq : float
Chi-squared of the fit.
ndof : int
Number of degrees of freedom of the fit.
"""
return
@abstractmethod
def _model(self, ha):
"""Calculate the model for the point source response.
Any subclass of FitTransit must define this method.
Parameters
----------
ha : np.ndarray
Hour angle in degrees.
"""
return
@abstractmethod
def _jacobian(self, ha):
"""Calculate the jacobian of the model for the point source response.
Any subclass of FitTransit must define this method.
Parameters
----------
ha : np.ndarray
Hour angle in degrees.
Returns
-------
jac : np.ndarray[..., nparam, nha]
The jacobian defined as
jac[..., i, j] = d(model(ha)) / d(param[i]) evaluated at ha[j]
"""
return
@classmethod
def tval(cls, alpha, ndof):
"""Quantile of a standardized Student's t random variable.
This quantity is slow to compute. Past values will be cached
in a dictionary shared by all instances of the class.
Parameters
----------
alpha : float
Calculate the quantile corresponding to the lower tail probability
1 - alpha / 2.
ndof : np.ndarray or int
Number of degrees of freedom of the Student's t variable.
Returns
-------
tval : np.ndarray or float
Quantile of a standardized Student's t random variable.
"""
prob = 1.0 - 0.5 * alpha
arr_ndof = np.atleast_1d(ndof)
tval = np.zeros(arr_ndof.shape, dtype=np.float32)
for ind, nd in np.ndenumerate(arr_ndof):
key = (int(100.0 * prob), nd)
if key not in cls._tval:
cls._tval[key] = scipy.stats.t.ppf(prob, nd)
tval[ind] = cls._tval[key]
if np.isscalar(ndof):
tval = np.squeeze(tval)
return tval
class FitPoly(FitTransit):
"""Base class for fitting polynomials to point source transits.
Maps methods of np.polynomial to methods of the class for the
requested polynomial type.
"""
def __init__(self, poly_type="standard", *args, **kwargs):
"""Instantiates a FitPoly object.
Parameters
----------
poly_type : str
Type | |
Ps_z[:, :, 0]
theta = np.arctan2(Ps_y, Ps_x)
phi = np.arctan2(Ps_z, np.sqrt(Ps_x ** 2 + Ps_y ** 2))
a = 2 * np.pi / (Ws - 1)
b = np.pi - a * (Ws - 1)
self.map_x = (1.0 / a) * (theta - b)
a = -np.pi / (Hs - 1)
b = np.pi / 2
self.map_y = (1.0 / a) * (phi - b)
output = cv2.remap(
img,
self.map_x.astype(np.float32),
self.map_y.astype(np.float32),
cv2.INTER_CUBIC,
borderMode=cv2.BORDER_WRAP,
)
if self.f < self.fmin:
r = np.sqrt(np.abs(-(self.f ** 2) / (1 - self.xi ** 2)))
mask = np.zeros_like(output[:, :, 0])
mask = cv2.circle(
mask, (int(self.Cx), int(self.Cy)), int(r), (255, 255, 255), -1
)
output = cv2.bitwise_and(output, output, mask=mask)
return output
def equirect2Fisheye_EUCM(self,
img,
outShape,
f=50,
a_=0.5,
b_=0.5,
angles=[0, 0, 0]
):
self.Hd = outShape[0]
self.Wd = outShape[1]
self.f = f
self.a_ = a_
self.b_ = b_
Hs, Ws = img.shape[:2]
self.Cx = self.Wd / 2.0
self.Cy = self.Hd / 2.0
x = np.linspace(0, self.Wd - 1, num=self.Wd, dtype=np.float32)
y = np.linspace(0, self.Hd - 1, num=self.Hd, dtype=np.float32)
x, y = np.meshgrid(range(self.Wd), range(self.Hd))
xref = 1
yref = 1
self.fmin = (
np.lib.scimath.sqrt(
self.b_
* (2 * self.a_ - 1)
* ((xref - self.Cx) ** 2 + (yref - self.Cy) ** 2)
)
* 1.0001
)
# print(self.fmin)
if np.real(self.fmin) <= 0:
self.fmin = np.imag(self.fmin)
# print(self.f)
# print(self.fmin)
mx = (x - self.Cx) / self.f
my = (y - self.Cy) / self.f
r_2 = mx ** 2 + my ** 2
mz = np.real(
(1 - self.b_ * self.a_ * self.a_ * r_2)
/ (
self.a_ * np.lib.scimath.sqrt(1 - (2 * self.a_ - 1) *
self.b_ * r_2)
+ (1 - self.a_)
)
)
coef = 1 / np.sqrt(mx ** 2 + my ** 2 + mz ** 2)
Ps_x = mx * coef
Ps_y = my * coef
Ps_z = mz * coef
self.alpha = angles[0]
self.beta = angles[1]
self.gamma = angles[2]
R = np.matmul(
rmat(self.alpha, self.beta, self.gamma),
np.matmul(rmat(0, -90, 45), rmat(0, 90, 90)),
)
Ps = np.stack((Ps_x, Ps_y, Ps_z), -1)
Ps = np.matmul(Ps, R.T)
Ps_x, Ps_y, Ps_z = np.split(Ps, 3, axis=-1)
Ps_x = Ps_x[:, :, 0]
Ps_y = Ps_y[:, :, 0]
Ps_z = Ps_z[:, :, 0]
theta = np.arctan2(Ps_y, Ps_x)
phi = np.arctan2(Ps_z, np.sqrt(Ps_x ** 2 + Ps_y ** 2))
a = 2 * np.pi / (Ws - 1)
b = np.pi - a * (Ws - 1)
self.map_x = (1.0 / a) * (theta - b)
a = -np.pi / (Hs - 1)
b = np.pi / 2
self.map_y = (1.0 / a) * (phi - b)
output = cv2.remap(
img,
self.map_x.astype(np.float32),
self.map_y.astype(np.float32),
cv2.INTER_CUBIC,
borderMode=cv2.BORDER_WRAP,
)
if self.f < self.fmin:
r = np.sqrt(np.abs((self.f ** 2) / (self.b_ * (2 * self.a_ - 1))))
mask = np.zeros_like(output[:, :, 0])
mask = cv2.circle(
mask, (int(self.Cx), int(self.Cy)), int(r), (255, 255, 255), -1
)
output = cv2.bitwise_and(output, output, mask=mask)
return output
def equirect2Fisheye_FOV(self,
img,
outShape,
f=50,
w_=0.5,
angles=[0, 0, 0]
):
self.Hd = outShape[0]
self.Wd = outShape[1]
self.f = f
self.w_ = w_
Hs, Ws = img.shape[:2]
self.Cx = self.Wd / 2.0
self.Cy = self.Hd / 2.0
x = np.linspace(0, self.Wd - 1, num=self.Wd, dtype=np.float32)
y = np.linspace(0, self.Hd - 1, num=self.Hd, dtype=np.float32)
x, y = np.meshgrid(range(self.Wd), range(self.Hd))
mx = (x - self.Cx) / self.f
my = (y - self.Cy) / self.f
rd = np.sqrt(mx ** 2 + my ** 2)
Ps_x = mx * np.sin(rd * self.w_) / (2 * rd * np.tan(self.w_ / 2))
Ps_y = my * np.sin(rd * self.w_) / (2 * rd * np.tan(self.w_ / 2))
Ps_z = np.cos(rd * self.w_)
self.alpha = angles[0]
self.beta = angles[1]
self.gamma = angles[2]
R = np.matmul(
rmat(self.alpha, self.beta, self.gamma),
np.matmul(rmat(0, -90, 45), rmat(0, 90, 90)),
)
Ps = np.stack((Ps_x, Ps_y, Ps_z), -1)
Ps = np.matmul(Ps, R.T)
Ps_x, Ps_y, Ps_z = np.split(Ps, 3, axis=-1)
Ps_x = Ps_x[:, :, 0]
Ps_y = Ps_y[:, :, 0]
Ps_z = Ps_z[:, :, 0]
theta = np.arctan2(Ps_y, Ps_x)
phi = np.arctan2(Ps_z, np.sqrt(Ps_x ** 2 + Ps_y ** 2))
a = 2 * np.pi / (Ws - 1)
b = np.pi - a * (Ws - 1)
self.map_x = (1.0 / a) * (theta - b)
a = -np.pi / (Hs - 1)
b = np.pi / 2
self.map_y = (1.0 / a) * (phi - b)
output = cv2.remap(
img,
self.map_x.astype(np.float32),
self.map_y.astype(np.float32),
cv2.INTER_CUBIC,
borderMode=cv2.BORDER_WRAP,
)
return output
def equirect2Fisheye_DS(self,
img,
outShape,
f=50,
a_=0.5,
xi_=0.5,
angles=[0, 0, 0]
):
self.Hd = outShape[0]
self.Wd = outShape[1]
self.f = f
self.a_ = a_
self.xi_ = xi_
Hs, Ws = img.shape[:2]
self.Cx = self.Wd / 2.0
self.Cy = self.Hd / 2.0
x = np.linspace(0, self.Wd - 1, num=self.Wd, dtype=np.float32)
y = np.linspace(0, self.Hd - 1, num=self.Hd, dtype=np.float32)
x, y = np.meshgrid(range(self.Wd), range(self.Hd))
xref = 1
yref = 1
self.fmin = np.sqrt(np.abs((2 * self.a_ - 1) *
((xref - self.Cx) ** 2 + (yref - self.Cy) ** 2))
)
mx = (x - self.Cx) / self.f
my = (y - self.Cy) / self.f
r_2 = mx ** 2 + my ** 2
mz = np.real(
(1 - self.a_ * self.a_ * r_2)
/ (self.a_ * np.lib.scimath.sqrt(1 - (2 * self.a_ - 1) * r_2) +
1 - self.a_)
)
omega = np.real(
(mz * self.xi_ + np.lib.scimath.sqrt(mz ** 2 +
(1 - self.xi_ ** 2) * r_2))
/ (mz ** 2 + r_2)
)
Ps_x = omega * mx
Ps_y = omega * my
Ps_z = omega * mz - self.xi_
self.alpha = angles[0]
self.beta = angles[1]
self.gamma = angles[2]
R = np.matmul(
rmat(self.alpha, self.beta, self.gamma),
np.matmul(rmat(0, -90, 45), rmat(0, 90, 90)),
)
Ps = np.stack((Ps_x, Ps_y, Ps_z), -1)
Ps = np.matmul(Ps, R.T)
Ps_x, Ps_y, Ps_z = np.split(Ps, 3, axis=-1)
Ps_x = Ps_x[:, :, 0]
Ps_y = Ps_y[:, :, 0]
Ps_z = Ps_z[:, :, 0]
theta = np.arctan2(Ps_y, Ps_x)
phi = np.arctan2(Ps_z, np.sqrt(Ps_x ** 2 + Ps_y ** 2))
a = 2 * np.pi / (Ws - 1)
b = np.pi - a * (Ws - 1)
self.map_x = (1.0 / a) * (theta - b)
a = -np.pi / (Hs - 1)
b = np.pi / 2
self.map_y = (1.0 / a) * (phi - b)
output = cv2.remap(
img,
self.map_x.astype(np.float32),
self.map_y.astype(np.float32),
cv2.INTER_CUBIC,
borderMode=cv2.BORDER_WRAP,
)
if self.f < self.fmin:
r = np.sqrt(np.abs((self.f ** 2) / (2 * self.a_ - 1)))
mask = np.zeros_like(output[:, :, 0])
mask = cv2.circle(
mask, (int(self.Cx), int(self.Cy)), int(r), (255, 255, 255), -1
)
output = cv2.bitwise_and(output, output, mask=mask)
return output
def applyMap(self,
map,
srcFrame
):
if map == 0:
return cv2.remap(
srcFrame,
self.map_x,
self.map_y,
interpolation=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
)
if map == 1:
dstFrame = cv2.remap(
srcFrame,
self.map_x,
self.map_y,
interpolation=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
)
if self.dice:
line1 = np.hstack(
(
dstFrame[:, 4 * self.side: 5 * self.side, :] * 0,
cv2.flip(dstFrame[:, 4 *
self.side: 5 * self.side, :], 0),
dstFrame[:, 4 * self.side: 5 * self.side, :] * 0,
dstFrame[:, 4 * self.side: 5 * self.side, :] * 0,
)
)
line2 = np.hstack(
(
dstFrame[:, 3 * self.side: 4 * self.side, :],
dstFrame[:, 0 * self.side: 1 * self.side, :],
cv2.flip(dstFrame[:, 1 *
self.side: 2 * self.side, :], 1),
cv2.flip(dstFrame[:, 2 *
self.side: 3 * self.side, :], 1),
)
)
line3 = np.hstack(
(
dstFrame[:, 5 * self.side: 6 * self.side, :] * 0,
dstFrame[:, 5 * self.side: 6 * self.side, :],
dstFrame[:, 5 * self.side: 6 * self.side, :] * 0,
dstFrame[:, 5 * self.side: 6 * self.side, :] * 0,
)
)
dstFrame = np.vstack((line1, line2, line3))
return dstFrame
if map == 2:
h, w = srcFrame.shape[:2]
if h / w == 3 / 4:
l1, l2, l3 = np.split(srcFrame, 3, axis=0)
_, pY, _, _ = np.split(l1, 4, axis=1)
nX, pZ, pX, nZ = np.split(l2, 4, axis=1)
_, nY, _, _ = np.split(l3, 4, axis=1)
srcFrame = np.hstack(
(pZ, cv2.flip(pX, 1), cv2.flip(nZ, 1),
| |
<filename>pygeom/geom3d/cubicspline.py
from .line import Line
from .vector import Vector
from numpy.matlib import zeros
from numpy.linalg import solve
from matplotlib.pyplot import figure
from mpl_toolkits.mplot3d import Axes3D
class CubicSpline(object):
u"""This class stores a 3D parametric cubic spline."""
pnts = None
npnts = None
clsd = False
pnls = None
npnls = None
d2r = None
dr = None
tanA = None
tanB = None
R = None
def __init__(self, pnts, clsd=False, tanA=None, tanB=None):
u"""This function initialises the object."""
self.pnts = pnts
self.clsd = clsd
self.tanA = tanA
self.tanB = tanB
self.update()
def update(self):
u"""This function calculates the other parameters of the object."""
self.npnts = len(self.pnts)
if self.clsd:
indas = [i for i in range(self.npnts)]
indbs = [i+1 for i in range(self.npnts)]
indbs[-1] = 0
self.npnls = self.npnts
else:
indas = [i for i in range(self.npnts-1)]
indbs = [i+1 for i in range(self.npnts-1)]
self.npnls = self.npnts-1
self.pnls = []
for i in range(self.npnls):
inda = indas[i]
indb = indbs[i]
self.pnls.append(Line(self.pnts[inda], self.pnts[indb]))
if self.clsd:
self.d2r = self.calc_d2r_closed()
self.dr = self.calc_dr_closed()
else:
self.d2r = self.calc_d2r_open(tanA=self.tanA, tanB=self.tanB)
self.dr = self.calc_dr_open()
# self.th = self.calc_th()
self.R = self.calc_R()
def calc_d2r_open(self, tanA=None, tanB=None):
u"""This function calculates the curvature of an open ended spline."""
pnl_dx = [pnl.vec.x/pnl.length for pnl in self.pnls]
pnl_dy = [pnl.vec.y/pnl.length for pnl in self.pnls]
pnl_dz = [pnl.vec.z/pnl.length for pnl in self.pnls]
del_dx = [0.]*self.npnts
del_dy = [0.]*self.npnts
del_dz = [0.]*self.npnts
if tanA != None:
utA = tanA.to_unit()
dxA = utA.x
dyA = utA.y
dzA = utA.z
del_dx[0] = pnl_dx[0]-dxA
del_dy[0] = pnl_dy[0]-dyA
del_dz[0] = pnl_dz[0]-dzA
for i in range(1, self.npnts-1):
del_dx[i] = pnl_dx[i]-pnl_dx[i-1]
del_dy[i] = pnl_dy[i]-pnl_dy[i-1]
del_dz[i] = pnl_dz[i]-pnl_dz[i-1]
if tanB != None:
utB = tanB.to_unit()
dxB = utB.x
dyB = utB.y
dzB = utB.z
del_dx[-1] = dxB-pnl_dx[-1]
del_dy[-1] = dyB-pnl_dy[-1]
del_dz[-1] = dzB-pnl_dz[-1]
a = [0.]*self.npnts
b = [1.]*self.npnts
c = [0.]*self.npnts
rx = [0.]*self.npnts
ry = [0.]*self.npnts
rz = [0.]*self.npnts
if tanA != None:
sB = self.pnls[0].length
b[0] = sB/3
c[0] = sB/6
rx[0] = del_dx[0]
ry[0] = del_dy[0]
rz[0] = del_dz[0]
for i in range(1, self.npnts-1):
sA = self.pnls[i-1].length
sB = self.pnls[i].length
a[i] = sA/6
b[i] = (sA+sB)/3
c[i] = sB/6
rx[i] = del_dx[i]
ry[i] = del_dy[i]
rz[i] = del_dz[i]
if tanB != None:
sA = self.pnls[-1].length
a[-1] = sA/6
b[-1] = sA/3
rx[-1] = del_dx[-1]
ry[-1] = del_dy[-1]
rz[-1] = del_dz[-1]
Γ = [0.]*self.npnts
d2x = [0.]*self.npnts
d2y = [0.]*self.npnts
d2z = [0.]*self.npnts
β = b[0]
d2x[0] = rx[0]/β
d2y[0] = ry[0]/β
d2z[0] = rz[0]/β
for i in range(1, self.npnts):
Γ[i] = c[i-1]/β
β = b[i]-a[i]*Γ[i]
d2x[i] = (rx[i]-a[i]*d2x[i-1])/β
d2y[i] = (ry[i]-a[i]*d2y[i-1])/β
d2z[i] = (rz[i]-a[i]*d2z[i-1])/β
for i in range(self.npnts-2, -1, -1):
d2x[i] -= Γ[i+1]*d2x[i+1]
d2y[i] -= Γ[i+1]*d2y[i+1]
d2z[i] -= Γ[i+1]*d2z[i+1]
d2r = [Vector(d2x[i], d2y[i], d2z[i]) for i in range(self.npnts)]
return d2r
def calc_d2r_closed(self):
u"""This function calculates the curvature of a closed spline."""
n = self.npnts
inda = [i-1 for i in range(n)]
indb = [i for i in range(n)]
inda[0] = n-1
pnl_dx = [pnl.vec.x/pnl.length for pnl in self.pnls]
pnl_dy = [pnl.vec.y/pnl.length for pnl in self.pnls]
pnl_dz = [pnl.vec.z/pnl.length for pnl in self.pnls]
del_dx = [0.]*n
del_dy = [0.]*n
del_dz = [0.]*n
for i in range(n):
del_dx[i] = pnl_dx[indb[i]]-pnl_dx[inda[i]]
del_dy[i] = pnl_dy[indb[i]]-pnl_dy[inda[i]]
del_dz[i] = pnl_dz[indb[i]]-pnl_dz[inda[i]]
A = zeros((n, n))
B = zeros((n, 3))
for i in range(n):
sA = self.pnls[inda[i]].length
sB = self.pnls[indb[i]].length
if i-1 < 0:
A[i, n-1] = sA/6
else:
A[i, i-1] = sA/6
A[i, i] = (sA+sB)/3
if i+1 > n-1:
A[i, 0] = sB/6
else:
A[i, i+1] = sB/6
B[i, 0] = del_dx[i]
B[i, 1] = del_dy[i]
B[i, 2] = del_dz[i]
X = solve(A, B)
d2x = [X[i, 0] for i in range(n)]
d2y = [X[i, 1] for i in range(n)]
d2z = [X[i, 2] for i in range(n)]
d2r = [Vector(d2x[i], d2y[i], d2z[i]) for i in range(self.npnts)]
return d2r
def calc_dr_open(self):
u"""This function calculates the gradient of an open ended spline."""
dx = []
dy = []
dz = []
for i in range(self.npnls):
xA = self.pnts[i].x
xB = self.pnts[i+1].x
d2xA = self.d2r[i].x
d2xB = self.d2r[i+1].x
yA = self.pnts[i].y
yB = self.pnts[i+1].y
d2yA = self.d2r[i].y
d2yB = self.d2r[i+1].y
zA = self.pnts[i].z
zB = self.pnts[i+1].z
d2zA = self.d2r[i].z
d2zB = self.d2r[i+1].z
sP = self.pnls[i].length
dxA = (xB-xA)/sP-sP/3*d2xA-sP/6*d2xB
dyA = (yB-yA)/sP-sP/3*d2yA-sP/6*d2yB
dzA = (zB-zA)/sP-sP/3*d2zA-sP/6*d2zB
dx.append(dxA)
dy.append(dyA)
dz.append(dzA)
dxB = (xB-xA)/sP+sP/6*d2xA+sP/3*d2xB
dyB = (yB-yA)/sP+sP/6*d2yA+sP/3*d2yB
dzB = (zB-zA)/sP+sP/6*d2zA+sP/3*d2zB
dx.append(dxB)
dy.append(dyB)
dz.append(dzB)
dr = [Vector(dx[i], dy[i], dz[i]) for i in range(self.npnts)]
return dr
def calc_dr_closed(self):
u"""This function calculates the gradient of a closed spline."""
n = self.npnts
inda = [i for i in range(n)]
indb = [i+1 for i in range(n)]
indb[-1] = 0
dx = []
dy = []
dz = []
for i in range(self.npnls):
ia = inda[i]
ib = indb[i]
xA = self.pnts[ia].x
xB = self.pnts[ib].x
d2xA = self.d2r[ia].x
d2xB = self.d2r[ib].x
yA = self.pnts[ia].y
yB = self.pnts[ib].y
d2yA = self.d2r[ia].y
d2yB = self.d2r[ib].y
zA = self.pnts[ia].z
zB = self.pnts[ib].z
d2zA = self.d2r[ia].z
d2zB = self.d2r[ib].z
sP = self.pnls[i].length
dxA = (xB-xA)/sP-sP/3*d2xA-sP/6*d2xB
dyA = (yB-yA)/sP-sP/3*d2yA-sP/6*d2yB
dzA = (zB-zA)/sP-sP/3*d2zA-sP/6*d2zB
dx.append(dxA)
dy.append(dyA)
dz.append(dzA)
dr = [Vector(dx[i], dy[i], dz[i]) for i in range(self.npnts)]
return dr
def calc_R(self):
u"""This function calculates the radius of curvature of the spline."""
R = []
for i in range(self.npnts):
dri = self.dr[i]
d2ri = self.d2r[i]
k = (dri**d2ri).return_magnitude()/(dri.return_magnitude())**3
if k == 0.:
R.append(float('inf'))
else:
R.append(1/k)
return R
def spline_points(self, num=5):
u"""This function interpolates the spline with a number of points."""
x = []
y = []
z = []
for i in range(self.npnls):
ia = i
xA = self.pnts[ia].x
d2xA = self.d2r[ia].x
yA = self.pnts[ia].y
d2yA = self.d2r[ia].y
zA = self.pnts[ia].z
d2zA = self.d2r[ia].z
ib = i+1
if ib == self.npnts:
ib = 0
xB = self.pnts[ib].x
d2xB = self.d2r[ib].x
yB = self.pnts[ib].y
d2yB = self.d2r[ib].y
zB = self.pnts[ib].z
d2zB = self.d2r[ib].z
sP = self.pnls[i].length
for j in range(num):
s = j*sP/num
A = (sP-s)/sP
B = s/sP
C = (A**3-A)*sP**2/6
D = (B**3-B)*sP**2/6
x.append(A*xA+B*xB+C*d2xA+D*d2xB)
y.append(A*yA+B*yB+C*d2yA+D*d2yB)
z.append(A*zA+B*zB+C*d2zA+D*d2zB)
if self.clsd:
x.append(self.pnts[0].x)
y.append(self.pnts[0].y)
z.append(self.pnts[0].z)
else:
x.append(self.pnts[-1].x)
y.append(self.pnts[-1].y)
z.append(self.pnts[-1].z)
return x, y, z
def spline_gradient(self, num=5):
u"""This function interpolates the gradient of the spline."""
dx = []
dy = []
dz = []
for i in range(self.npnls):
ia = i
xA = self.pnts[ia].x
d2xA = self.d2r[ia].x
yA = self.pnts[ia].y
d2yA = self.d2r[ia].y
zA = self.pnts[ia].z
d2zA = self.d2r[ia].z
ib = i+1
if ib == self.npnts:
ib = 0
xB = self.pnts[ib].x
d2xB = self.d2r[ib].x
yB = self.pnts[ib].y
d2yB = self.d2r[ib].y
zB = self.pnts[ib].z
d2zB = self.d2r[ib].z
sP = self.pnls[i].length
for j in range(num):
s = j*sP/num
A = (sP-s)/sP
B = s/sP
dx.append((xB-xA)/sP-(3*A**2-1)*sP/6*d2xA+(3*B**2-1)*sP/6*d2xB)
dy.append((yB-yA)/sP-(3*A**2-1)*sP/6*d2yA+(3*B**2-1)*sP/6*d2yB)
dz.append((zB-zA)/sP-(3*A**2-1)*sP/6*d2zA+(3*B**2-1)*sP/6*d2zB)
if self.clsd:
dx.append(self.dr[0].x)
dy.append(self.dr[0].y)
dz.append(self.dr[0].z)
else:
dx.append(self.dr[-1].x)
dy.append(self.dr[-1].y)
dz.append(self.dr[-1].z)
return dx, dy, dz
def spline_curvature(self, num=1):
u"""This function interpolates the curvature of the spline."""
d2x = []
d2y = []
d2z = []
for i in range(self.npnls):
ia = i
d2xA = self.d2r[ia].x
d2yA = self.d2r[ia].y
d2zA = self.d2r[ia].z
ib = i+1
if ib == self.npnts:
ib = 0
d2xB = self.d2r[ib].x
d2yB = self.d2r[ib].y
d2zB = self.d2r[ib].z
sP = self.pnls[i].length
for j in range(num):
s = j*sP/num
A = (sP-s)/sP
B = s/sP
d2x.append(A*d2xA+B*d2xB)
d2y.append(A*d2yA+B*d2yB)
d2z.append(A*d2zA+B*d2zB)
if self.clsd:
d2x.append(self.d2r[0].x)
d2y.append(self.d2r[0].y)
d2z.append(self.d2r[0].z)
else:
d2x.append(self.d2r[-1].x)
d2y.append(self.d2r[-1].y)
d2z.append(self.d2r[-1].z)
return d2x, d2y, d2z
def scatter(self, ax=None, label=False):
u"""This function plots the points of the spline."""
if ax == None:
fig = figure()
ax = Axes3D(fig)
ax.grid(True)
x = []
y = []
z = []
for i in range(self.npnts):
x.append(self.pnts[i].x)
y.append(self.pnts[i].y)
z.append(self.pnts[i].z)
ax.scatter(x, y, z)
if label:
for i in range(self.npnts):
ax.text(x[i], y[i], z[i], i)
return ax
def plot_spline(self, num=5, ax=None, color='blue'):
u"""This function plots the spline using the interpolated points."""
if ax == None:
fig = figure()
ax = Axes3D(fig)
ax.grid(True)
x, y, z = self.spline_points(num)
ax.plot(x, y, z, color=color)
return ax
def arc_length(self, num=1):
u"""This function calculates the arc length of the spline."""
s = []
sc = 0.
for i in | |
待启用的直播域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class EnableLiveDomainResponse(AbstractModel):
"""EnableLiveDomain返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ForbidLiveDomainRequest(AbstractModel):
"""ForbidLiveDomain请求参数结构体
"""
def __init__(self):
r"""
:param DomainName: 待停用的直播域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ForbidLiveDomainResponse(AbstractModel):
"""ForbidLiveDomain返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ForbidLiveStreamRequest(AbstractModel):
"""ForbidLiveStream请求参数结构体
"""
def __init__(self):
r"""
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param DomainName: 您的推流域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
:param ResumeTime: 恢复流的时间。UTC 格式,例如:2018-11-29T19:00:00Z。
注意:
1. 默认禁播7天,且最长支持禁播90天。
2. 北京时间值为 UTC 时间值 + 8 小时,格式按照 ISO 8601 标准表示,详见 [ISO 日期格式说明](https://cloud.tencent.com/document/product/266/11732#I)。
:type ResumeTime: str
:param Reason: 禁推原因。
注明:请务必填写禁推原因,防止误操作。
长度限制:2048字节。
:type Reason: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
self.ResumeTime = None
self.Reason = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
self.ResumeTime = params.get("ResumeTime")
self.Reason = params.get("Reason")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ForbidLiveStreamResponse(AbstractModel):
"""ForbidLiveStream返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ForbidStreamInfo(AbstractModel):
"""禁推流列表
"""
def __init__(self):
r"""
:param StreamName: 流名称。
:type StreamName: str
:param CreateTime: 创建时间。
:type CreateTime: str
:param ExpireTime: 禁推过期时间。
:type ExpireTime: str
"""
self.StreamName = None
self.CreateTime = None
self.ExpireTime = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.CreateTime = params.get("CreateTime")
self.ExpireTime = params.get("ExpireTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GroupProIspDataInfo(AbstractModel):
"""某省份某运营商在某段时间内的带宽,流量,请求数和并发数
"""
def __init__(self):
r"""
:param ProvinceName: 省份。
:type ProvinceName: str
:param IspName: 运营商。
:type IspName: str
:param DetailInfoList: 分钟维度的明细数据。
:type DetailInfoList: list of CdnPlayStatData
"""
self.ProvinceName = None
self.IspName = None
self.DetailInfoList = None
def _deserialize(self, params):
self.ProvinceName = params.get("ProvinceName")
self.IspName = params.get("IspName")
if params.get("DetailInfoList") is not None:
self.DetailInfoList = []
for item in params.get("DetailInfoList"):
obj = CdnPlayStatData()
obj._deserialize(item)
self.DetailInfoList.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HlsSpecialParam(AbstractModel):
"""HLS专属录制参数
"""
def __init__(self):
r"""
:param FlowContinueDuration: HLS续流超时时间。
取值范围[0,1800]。
:type FlowContinueDuration: int
"""
self.FlowContinueDuration = None
def _deserialize(self, params):
self.FlowContinueDuration = params.get("FlowContinueDuration")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HttpCodeInfo(AbstractModel):
"""HTTP返回码和统计数据
"""
def __init__(self):
r"""
:param HttpCode: HTTP协议返回码。
例:"2xx", "3xx", "4xx", "5xx"。
:type HttpCode: str
:param ValueList: 统计信息,对于无数据的时间点,会补0。
:type ValueList: list of HttpCodeValue
"""
self.HttpCode = None
self.ValueList = None
def _deserialize(self, params):
self.HttpCode = params.get("HttpCode")
if params.get("ValueList") is not None:
self.ValueList = []
for item in params.get("ValueList"):
obj = HttpCodeValue()
obj._deserialize(item)
self.ValueList.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HttpCodeValue(AbstractModel):
"""HTTP返回码数据信息
"""
def __init__(self):
r"""
:param Time: 时间,格式:yyyy-mm-dd HH:MM:SS。
:type Time: str
:param Numbers: 次数。
:type Numbers: int
:param Percentage: 占比。
:type Percentage: float
"""
self.Time = None
self.Numbers = None
self.Percentage = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.Numbers = params.get("Numbers")
self.Percentage = params.get("Percentage")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HttpStatusData(AbstractModel):
"""播放错误码信息
"""
def __init__(self):
r"""
:param Time: 数据时间点,
格式:yyyy-mm-dd HH:MM:SS。
:type Time: str
:param HttpStatusInfoList: 播放状态码详细信息。
:type HttpStatusInfoList: list of HttpStatusInfo
"""
self.Time = None
self.HttpStatusInfoList = None
def _deserialize(self, params):
self.Time = params.get("Time")
if params.get("HttpStatusInfoList") is not None:
self.HttpStatusInfoList = []
for item in params.get("HttpStatusInfoList"):
obj = HttpStatusInfo()
obj._deserialize(item)
self.HttpStatusInfoList.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HttpStatusInfo(AbstractModel):
"""播放错误码信息
"""
def __init__(self):
r"""
:param HttpStatus: 播放HTTP状态码。
:type HttpStatus: str
:param Num: 个数。
:type Num: int
"""
self.HttpStatus = None
self.Num = None
def _deserialize(self, params):
self.HttpStatus = params.get("HttpStatus")
self.Num = params.get("Num")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LivePackageInfo(AbstractModel):
"""直播包信息。
"""
def __init__(self):
r"""
:param Id: 包 ID。
:type Id: str
:param Total: 总量。
注意:当为流量包时单位为字节。
当为转码包时单位为分钟。
:type Total: int
:param Used: 使用量。
注意:当为流量包时单位为字节。
当为转码包时单位为分钟。
当为连麦包时单位为小时。
:type Used: int
:param Left: 剩余量。
注意:当为流量包时单位为字节。
当为转码包时单位为分钟。
当为连麦包时单位为小时。
:type Left: int
:param BuyTime: 购买时间。
:type BuyTime: str
:param ExpireTime: 过期时间。
:type ExpireTime: str
:param Type: 包类型,可选值:
0: 流量包。
1: 普通转码包。
2: 极速高清包。
3: 连麦包。
:type Type: int
:param Status: 包状态,可选值:
0: 未使用。
1: 使用中。
2: 已过期。
3: 已冻结。
4: 已耗尽。
5: 已退款
:type Status: int
"""
self.Id = None
self.Total = None
self.Used = None
self.Left = None
self.BuyTime = None
self.ExpireTime = None
self.Type = None
self.Status = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Total = params.get("Total")
self.Used = params.get("Used")
self.Left = params.get("Left")
self.BuyTime = params.get("BuyTime")
self.ExpireTime = params.get("ExpireTime")
self.Type = params.get("Type")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LogInfo(AbstractModel):
"""日志url信息。
"""
def __init__(self):
r"""
:param LogName: 日志名称。
:type LogName: str
:param LogUrl: 日志 URL。
:type LogUrl: str
:param LogTime: 日志生成时间。
:type LogTime: str
:param FileSize: 文件大小。
:type FileSize: int
"""
self.LogName = None
self.LogUrl = None
self.LogTime = None
self.FileSize = None
def _deserialize(self, params):
self.LogName = params.get("LogName")
self.LogUrl = params.get("LogUrl")
self.LogTime = params.get("LogTime")
self.FileSize = params.get("FileSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLiveCallbackTemplateRequest(AbstractModel):
"""ModifyLiveCallbackTemplate请求参数结构体
"""
def __init__(self):
r"""
:param TemplateId: DescribeLiveCallbackTemplates接口返回的模板 ID。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param StreamBeginNotifyUrl: 开播回调 URL。
:type StreamBeginNotifyUrl: str
:param StreamEndNotifyUrl: 断流回调 URL。
:type StreamEndNotifyUrl: str
:param RecordNotifyUrl: 录制回调 URL。
:type RecordNotifyUrl: str
:param SnapshotNotifyUrl: 截图回调 URL。
:type SnapshotNotifyUrl: str
:param PornCensorshipNotifyUrl: 鉴黄回调 URL。
:type PornCensorshipNotifyUrl: str
:param CallbackKey: 回调 Key,回调 URL 公用,回调签名详见事件消息通知文档。
[事件消息通知](/document/product/267/32744)。
:type CallbackKey: str
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.StreamBeginNotifyUrl = None
self.StreamEndNotifyUrl = None
self.RecordNotifyUrl = None
self.SnapshotNotifyUrl = None
self.PornCensorshipNotifyUrl = None
self.CallbackKey = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
self.StreamBeginNotifyUrl = params.get("StreamBeginNotifyUrl")
self.StreamEndNotifyUrl = params.get("StreamEndNotifyUrl")
self.RecordNotifyUrl = params.get("RecordNotifyUrl")
self.SnapshotNotifyUrl = params.get("SnapshotNotifyUrl")
self.PornCensorshipNotifyUrl = params.get("PornCensorshipNotifyUrl")
self.CallbackKey = params.get("CallbackKey")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLiveCallbackTemplateResponse(AbstractModel):
"""ModifyLiveCallbackTemplate返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveCertRequest(AbstractModel):
"""ModifyLiveCert请求参数结构体
"""
def __init__(self):
r"""
:param CertId: 证书Id。
:type CertId: str
:param CertType: 证书类型。0-用户添加证书;1-腾讯云托管证书。
:type CertType: int
:param CertName: 证书名称。
:type CertName: str
:param HttpsCrt: 证书内容,即公钥。
:type HttpsCrt: str
:param HttpsKey: 私钥。
:type HttpsKey: str
:param Description: 描述信息。
:type Description: str
"""
self.CertId = None
self.CertType = None
self.CertName = None
self.HttpsCrt = None
self.HttpsKey = None
self.Description = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
self.CertType = params.get("CertType")
self.CertName = params.get("CertName")
self.HttpsCrt = params.get("HttpsCrt")
self.HttpsKey = params.get("HttpsKey")
self.Description = params.get("Description")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyLiveCertResponse(AbstractModel):
"""ModifyLiveCert返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveDomainCertRequest(AbstractModel):
"""ModifyLiveDomainCert请求参数结构体
"""
def | |
<reponame>oliviersultan/qlik-py-tools<gh_stars>100-1000
import os
import sys
import ast
import time
import string
import locale
import pickle
import warnings
import numpy as np
import pandas as pd
from pathlib import Path
import ServerSideExtension_pb2 as SSE
# Suppress warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
from sklearn import preprocessing
# Add Generated folder to module path.
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(PARENT_DIR, 'generated'))
def request_df(request_list, row_template, col_headers):
"""
This function takes in a SSE request as a list together with a row template and column headers as lists of strings.
Returns a Data Frame for the request.
e.g. request_df(request_list, ['strData', 'numData', 'strData'], ['dim1', 'measure', 'kwargs'])
"""
rows = [row for request_rows in request_list for row in request_rows.rows]
outer = []
for i in range(len(rows)):
inner = []
for j in range(len(row_template)):
inner.append(getattr(rows[i].duals[j], row_template[j]))
outer.append(inner)
return pd.DataFrame(outer, columns=col_headers)
def get_response_rows(response, template):
"""
Take in a list of responses and covert them to SSE.Rows based on the column type specified in template
The template should be a list of the form: ["str", "num", "dual", ...]
For string values use: "str"
For numeric values use: "num"
For dual values: "dual"
"""
response_rows = []
# For each row in the response list
for row in response:
i = 0
this_row = []
if len(template) > 1:
# For each column in the row
for col in row:
# Convert values to type SSE.Dual according to the template list
if template[i] == "str":
if col is None:
col = "\x00"
elif type(col) is not str:
col = "{0:.5f}".format(col)
this_row.append(SSE.Dual(strData=col))
elif template[i] == "num":
this_row.append(SSE.Dual(numData=col))
elif template[i] == "dual":
this_row.append(SSE.Dual(strData=col, numData=col))
i = i + 1
else:
# Convert values to type SSE.Dual according to the template list
if template[0] == "str":
if row is None:
row = "\x00"
elif type(row) is not str:
row = "{0:.5f}".format(row)
this_row.append(SSE.Dual(strData=row))
elif template[0] == "num":
this_row.append(SSE.Dual(numData=row))
elif template[0] == "dual":
this_row.append(SSE.Dual(strData=row, numData=row))
# Group columns into a iterable and add to the the response_rows
response_rows.append(iter(this_row))
# Values are then structured as SSE.Rows
response_rows = [SSE.Row(duals=duals) for duals in response_rows]
return response_rows
def fillna(df, method="zeros"):
"""
Fill empty values in a Data Frame with the chosen method.
Valid options for method are: zeros, mean, median, mode
"""
if method == "mean":
return df.fillna(df.mean())
elif method == "median":
return df.fillna(df.median())
elif method == "mode":
return df.fillna(df.mode().iloc[0])
elif method == "none":
return df
else:
return df.fillna(0)
def get_scaler(df, missing="zeros", scaler="StandardScaler", **kwargs):
"""
Fit a sklearn scaler on a Data Frame and return the scaler.
Valid options for the scaler are: StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler, QuantileTransformer
Missing values must be dealt with before the scaling is applied.
Valid options specified through the missing parameter are: zeros, mean, median, mode
"""
s = getattr(preprocessing, scaler)
s = s(**kwargs)
df = fillna(df, method=missing)
return s.fit(df)
def scale(df, missing="zeros", scaler="robust", **kwargs):
"""
Scale values in a Data Frame using the relevant sklearn preprocessing method.
Valid options for the scaler are: standard, minmax, maxabs, robust, quantile
Missing values must be dealt with before the scaling is applied.
Valid options specified through the missing parameter are: zeros, mean, median, mode
"""
scalers = {'standard':'StandardScaler', 'minmax':'MinMaxScaler', 'maxabs':'MaxAbsScaler',\
'robust':'RobustScaler', 'quantile':'QuantileTransformer'}
s = getattr(preprocessing, scalers[scaler])
s = s(**kwargs)
df = fillna(df, method=missing)
df = pd.DataFrame(s.fit_transform(df), index=df.index, columns=df.columns)
return df
def count_placeholders(series):
"""
Count the number of null or zero values at the bottom of a series.
"""
count = 0
series = series.reset_index(drop=True)
for i in range(series.size-1, -1, -1):
if pd.isnull(series[i]) or series[i] == 0:
count += 1
else:
break
return count
def get_kwargs(str_kwargs):
"""
Take in a string of key word arguments and return as a dictionary of key, value pairs
The string should be in the form: 'arg1=value1,arg2=value2'
"""
# If the argument string is empty return an empty dict
if len(str_kwargs) == 0:
return dict()
# Remove any extra spaces and trailing commas
args = str_kwargs.strip()
if args[-1] == ',':
args = args[:-1]
# The parameter and values are transformed into key value pairs
args = args.translate(str.maketrans('', '', string.whitespace)).split(",")
kwargs = dict([arg.split("=") for arg in args])
return kwargs
def get_kwargs_by_type(dict_kwargs):
"""
Take in a dictionary of keyword arguments where values are converted to the specified data type.
The values in the dictionary should be a string of the form: "value|type"
e.g. {"arg1": "2|int", "arg2": "2.0|float", "arg3": "True|bool", "arg4": "string|str"}
Dictionaries, lists and arrays are allowed with the following format:
"x:1;y:2|dict|str|int" where str is the type for keys and int is the type for values
"x;y;z|array|str" where str is the type of values in the array
"1;2;3|list|int" where int is the type of the values in the list
"0;1|tuple|int" where int is the type of the values in the tuple
"""
# Dictionary used to convert argument values to the correct type
types = {"boolean":ast.literal_eval, "bool":ast.literal_eval, "integer":atoi, "int":atoi,\
"float":atof, "string":str, "str":str, "none":atonone, "None":atonone}
result_dict = {}
# Fill up the dictionary with the keyword arguments
for k, v in dict_kwargs.items():
# Split the value and type
split = v.split("|")
try:
if len(split) == 2:
# Handle conversion from string to boolean
if split[1] in ("boolean", "bool"):
split[0] = split[0].capitalize()
# Convert the value based on the correct type
result_dict[k] = types[split[1]](split[0])
elif split[1] == "dict":
# If the argument is a dictionary convert keys and values according to the correct types
items = split[0].split(";")
d = {}
for i in items:
a,b = i.split(":")
# Handle conversion from string to boolean
if split[2] in ("boolean", "bool"):
a = a.capitalize()
if split[3] in ("boolean", "bool"):
b = b.capitalize()
# Handle None as an item in the dictionary
if b in ("None", "none"):
d[types[split[2]](a)] = None
else:
d[types[split[2]](a)] = types[split[3]](b)
result_dict[k] = d
elif split[1] in ("list", "array", "tuple"):
# If the argument is a list, array or tuple convert keys and values according to the correct types
items = split[0].split(";")
l = []
for i in items:
# Handle conversion from string to boolean
if split[2] in ("boolean", "bool"):
i = i.capitalize()
# Handle None as an item
if i in ("None", "none"):
l.append(None)
else:
l.append(types[split[2]](i))
if split[1] == "array":
l = np.array(l)
elif split[1] == "tuple":
l = tuple(l)
result_dict[k] = l
except IndexError:
err = "List index out of range. This is most likely due to incorrect syntax of keyword arguments."
raise Exception(err)
return result_dict
def get_args_by_type(str_args):
"""
Take in a string of positional arguments and types and convert them to a list of values of the correct type.
The string should be in the form: 'value1|type1,value2|type2'.
e.g. '8|int, 0.5|float' would return [8, 0.5]
"""
# Dictionary used to convert argument values to the correct type
types = {"boolean":ast.literal_eval, "bool":ast.literal_eval, "integer":atoi, "int":atoi,\
"float":atof, "string":str, "str":str}
result_list = []
# If the argument string is empty return an empty list
if len(str_args) == 0:
return list()
for arg in str_args.split(","):
# Split the value and type
split = arg.strip().split("|")
try:
if len(split) == 2:
# Handle conversion from string to boolean
if split[1] in ("boolean", "bool"):
split[0] = split[0].capitalize()
# Convert the value based on the correct type
result_list.append(types[split[1]](split[0]))
elif split[1] == "dict":
# If the argument is a dictionary convert keys and values according to the correct types
items = split[0].split(";")
d = {}
for i in items:
a,b = i.split(":")
# Handle conversion from string to boolean
if split[2] in ("boolean", "bool"):
a = a.capitalize()
if split[3] in ("boolean", "bool"):
b = b.capitalize()
# Handle None as an item in the dictionary
if b == "None":
d[types[split[2]](a)] = None
else:
d[types[split[2]](a)] = types[split[3]](b)
result_list.append(d)
elif split[1] in ("list", "array", "tuple"):
# If the argument is a list, array or tuple convert keys and values according to the correct types
items = | |
dtype=float, sep=';', count=-1) for delaystr in delays_str]
# delay_settings = NP.asarray(delays_list)
# delay_settings *= 435e-12
# delays = NP.copy(delay_settings)
h = 0.7 # Hubble constant coefficient
cosmodel100 = CP.FlatLambdaCDM(H0=100.0, Om0=0.27) # Using H0 = 100 km/s/Mpc
cosmodel = CP.FlatLambdaCDM(H0=h*100.0, Om0=0.27) # Using H0 = h * 100 km/s/Mpc
dr_z = (FCNST.c/1e3) * bw * (1+redshift)**2 / CNST.rest_freq_HI / cosmodel100.H0.value / cosmodel100.efunc(redshift) # in Mpc/h
r_z = cosmodel100.comoving_transverse_distance(redshift).value # in Mpc/h
volfactor1 = A_eff / wavelength**2 / bw
volfactor2 = r_z**2 * dr_z / bw
Jy2K = wavelength**2 * CNST.Jy / (2*FCNST.k)
mJy2mK = NP.copy(Jy2K)
Jy2mK = 1e3 * Jy2K
mK2Jy = 1/Jy2mK
mK2mJy = 1/mJy2mK
K2Jy = 1/Jy2K
dspec_min = None
dspec_max = None
def kprll(eta, z):
return 2 * NP.pi * eta * cosmodel100.H0.value * CNST.rest_freq_HI * cosmodel100.efunc(z) / FCNST.c / (1+z)**2 * 1e3
def kperp(u, z):
return 2 * NP.pi * u / cosmodel100.comoving_transverse_distance(z).value
def ha(ra, lst):
ha = lst - ra
ha[ha > 180.0] = ha[ha > 180.0] - 360.0
return ha
antenna_temperature_file = rootdir+project_dir+'antenna_power_'+telescope_str+ground_plane_str+latitude_str+anttemp_snapshot_type_str+anttemp_duration_str+'_'+fg_str+'_sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(anttemp_nside)+'{0}_{1:.1f}_MHz'.format(anttemp_bandpass_str, freq/1e6)+'.fits'
hdulist = fits.open(antenna_temperature_file)
antpower_K = hdulist['Antenna Temperature'].data
anttemp_pointing_coords = hdulist['PRIMARY'].header['pointing_coords']
anttemp_pointing_center = hdulist['POINTINGS'].data['pointing_center']
anttemp_lst = hdulist['POINTINGS'].data['LST']
if (anttemp_pointing_coords == 'RADEC') or (anttemp_pointing_coords == 'radec'):
anttemp_pointings_radec = NP.copy(anttemp_pointing_center)
anttemp_pointings_hadec = NP.hstack(((anttemp_lst - anttemp_pointings_radec[:,0]).reshape(-1,1), anttemp_pointings_radec[:,1].reshape(-1,1)))
anttemp_pointings_altaz = GEOM.hadec2altaz(anttemp_pointings_hadec, latitude, units='degrees')
elif (anttemp_pointing_coords == 'HADEC') or (anttemp_pointing_coords == 'hadec'):
anttemp_pointings_hadec = NP.copy(anttemp_pointing_center)
anttemp_pointings_radec = NP.hstack(((anttemp_lst - anttemp_pointings_hadec[:,0]).reshape(-1,1), anttemp_pointings_hadec[:,1].reshape(-1,1)))
anttemp_pointings_altaz = GEOM.hadec2altaz(anttemp_pointings_hadec, latitude, units='degrees')
elif (anttemp_pointing_coords == 'ALTAZ') or (anttemp_pointing_coords == 'altaz'):
anttemp_pointings_altaz = NP.copy(anttemp_pointing_center)
anttemp_pointings_hadec = GEOM.altaz2hadec(anttemp_pointings_altaz, latitude, units='degrees')
anttemp_pointings_radec = NP.hstack(((anttemp_lst - anttemp_pointings_hadec[:,0]).reshape(-1,1), anttemp_pointings_hadec[:,1].reshape(-1,1)))
##################
if filenaming_convention == 'old':
asm_infile = rootdir+project_dir+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_asm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+pfb_instr
asm_CLEAN_infile = rootdir+project_dir+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_asm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+pfb_outstr+bpass_shape
else:
asm_infile = rootdir+project_dir+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'asm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz'.format(Tsys, bandpass_str, freq/1e6)+pfb_instr
asm_CLEAN_infile = rootdir+project_dir+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'asm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+pfb_outstr+bpass_shape
dsm_CLEAN_infile = rootdir+project_dir+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'dsm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+pfb_outstr+bpass_shape
csm_CLEAN_infile = rootdir+project_dir+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'csm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+pfb_outstr+bpass_shape
eor_infile = rootdir+project_dir+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'HI_cube'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz'.format(Tsys, eor_bandpass_str, freq/1e6)+pfb_instr
ia = RI.InterferometerArray(None, None, None, init_file=asm_infile+'.fits')
simdata_bl_orientation = NP.angle(ia.baselines[:,0] + 1j * ia.baselines[:,1], deg=True)
simdata_neg_bl_orientation_ind = simdata_bl_orientation > 90.0 + 0.5*180.0/n_bins_baseline_orientation
simdata_bl_orientation[simdata_neg_bl_orientation_ind] -= 180.0
ia.baselines[simdata_neg_bl_orientation_ind,:] = -ia.baselines[simdata_neg_bl_orientation_ind,:]
# PDB.set_trace()
# mwdt = ia.multi_window_delay_transform([4e6, 8e6], freq_center=[145e6, 160e6], shape='bhw')
hdulist = fits.open(asm_infile+'.fits')
latitude = hdulist[0].header['latitude']
pointing_coords = hdulist[0].header['pointing_coords']
pointings_table = hdulist['POINTING AND PHASE CENTER INFO'].data
lst = pointings_table['LST']
n_snaps = lst.size
hdulist.close()
if pointing_coords == 'altaz':
pointings_altaz = NP.hstack((pointings_table['pointing_latitude'].reshape(-1,1), pointings_table['pointing_longitude'].reshape(-1,1)))
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'radec':
pointings_radec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'hadec':
pointings_hadec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
hdulist = fits.open(asm_CLEAN_infile+'.fits')
clean_lags = hdulist['SPECTRAL INFO'].data['lag']
clean_lags_orig = NP.copy(clean_lags)
asm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
asm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
asm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
asm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(dsm_CLEAN_infile+'.fits')
dsm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
dsm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
dsm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
dsm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(csm_CLEAN_infile+'.fits')
csm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
csm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
csm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
csm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
eor_ia = RI.InterferometerArray(None, None, None, init_file=eor_infile+'.fits')
if NP.sum(simdata_neg_bl_orientation_ind) > 0:
eor_ia.conjugate(ind=NP.where(simdata_neg_bl_orientation_ind)[0])
eor_ia.delay_transform(oversampling_factor-1.0)
asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
asm_cc_vis[simdata_neg_bl_orientation_ind,:,:] = asm_cc_vis[simdata_neg_bl_orientation_ind,:,:].conj()
asm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:] = asm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:].conj()
dsm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = dsm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
dsm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = dsm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
dsm_cc_vis[simdata_neg_bl_orientation_ind,:,:] = dsm_cc_vis[simdata_neg_bl_orientation_ind,:,:].conj()
dsm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:] = dsm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:].conj()
csm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = csm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
csm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = csm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
csm_cc_vis[simdata_neg_bl_orientation_ind,:,:] = csm_cc_vis[simdata_neg_bl_orientation_ind,:,:].conj()
csm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:] = csm_cc_vis_res[simdata_neg_bl_orientation_ind,:,:].conj()
asm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(asm_cc_skyvis, axis=1),axes=1) * asm_cc_skyvis.shape[1] * freq_resolution
asm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(asm_cc_skyvis_res, axis=1),axes=1) * asm_cc_skyvis.shape[1] * freq_resolution
asm_cc_skyvis_lag = asm_cc_skyvis_lag + asm_ccres_sky
asm_cc_vis_lag = NP.fft.fftshift(NP.fft.ifft(asm_cc_vis, axis=1),axes=1) * asm_cc_vis.shape[1] * freq_resolution
asm_ccres = NP.fft.fftshift(NP.fft.ifft(asm_cc_vis_res, axis=1),axes=1) * asm_cc_vis.shape[1] * freq_resolution
asm_cc_vis_lag = asm_cc_vis_lag + asm_ccres
dsm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(dsm_cc_skyvis, axis=1),axes=1) * dsm_cc_skyvis.shape[1] * freq_resolution
dsm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(dsm_cc_skyvis_res, axis=1),axes=1) * dsm_cc_skyvis.shape[1] * freq_resolution
dsm_cc_skyvis_lag = dsm_cc_skyvis_lag + dsm_ccres_sky
dsm_cc_vis_lag = NP.fft.fftshift(NP.fft.ifft(dsm_cc_vis, axis=1),axes=1) * dsm_cc_vis.shape[1] * freq_resolution
dsm_ccres = NP.fft.fftshift(NP.fft.ifft(dsm_cc_vis_res, axis=1),axes=1) * dsm_cc_vis.shape[1] * freq_resolution
dsm_cc_vis_lag = dsm_cc_vis_lag + dsm_ccres
csm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(csm_cc_skyvis, axis=1),axes=1) * csm_cc_skyvis.shape[1] * freq_resolution
csm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(csm_cc_skyvis_res, axis=1),axes=1) * csm_cc_skyvis.shape[1] * freq_resolution
csm_cc_skyvis_lag = csm_cc_skyvis_lag + csm_ccres_sky
csm_cc_vis_lag = NP.fft.fftshift(NP.fft.ifft(csm_cc_vis, axis=1),axes=1) * csm_cc_vis.shape[1] * freq_resolution
csm_ccres = NP.fft.fftshift(NP.fft.ifft(csm_cc_vis_res, axis=1),axes=1) * csm_cc_vis.shape[1] * freq_resolution
csm_cc_vis_lag = csm_cc_vis_lag + csm_ccres
eor_skyvis_lag = eor_ia.skyvis_lag
asm_cc_skyvis_lag = DSP.downsampler(asm_cc_skyvis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
asm_cc_vis_lag = DSP.downsampler(asm_cc_vis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
dsm_cc_skyvis_lag = DSP.downsampler(dsm_cc_skyvis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
dsm_cc_vis_lag = DSP.downsampler(dsm_cc_vis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
csm_cc_skyvis_lag = DSP.downsampler(csm_cc_skyvis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
csm_cc_vis_lag = DSP.downsampler(csm_cc_vis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
# eor_skyvis_lag = DSP.downsampler(eor_skyvis_lag, 1.0*clean_lags.size/eor_ia.lags.size, axis=1)
clean_lags = DSP.downsampler(clean_lags, 1.0*clean_lags.size/ia.lags.size, axis=-1)
clean_lags = clean_lags.ravel()
vis_noise_lag = NP.copy(ia.vis_noise_lag)
vis_noise_lag = vis_noise_lag[truncated_ref_bl_ind,:,:]
asm_cc_skyvis_lag = asm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
asm_cc_vis_lag = asm_cc_vis_lag[truncated_ref_bl_ind,:,:]
dsm_cc_skyvis_lag = dsm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
dsm_cc_vis_lag = dsm_cc_vis_lag[truncated_ref_bl_ind,:,:]
csm_cc_skyvis_lag = csm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
csm_cc_vis_lag = csm_cc_vis_lag[truncated_ref_bl_ind,:,:]
eor_skyvis_lag = eor_skyvis_lag[truncated_ref_bl_ind,:,:]
delaymat = DLY.delay_envelope(ia.baselines[truncated_ref_bl_ind,:], pc, units='mks')
min_delay = -delaymat[0,:,1]-delaymat[0,:,0]
max_delay = delaymat[0,:,0]-delaymat[0,:,1]
clags = clean_lags.reshape(1,-1)
min_delay = min_delay.reshape(-1,1)
max_delay = max_delay.reshape(-1,1)
thermal_noise_window = NP.abs(clags) >= max_abs_delay*1e-6
thermal_noise_window = NP.repeat(thermal_noise_window, ia.baselines[truncated_ref_bl_ind,:].shape[0], axis=0)
EoR_window = NP.logical_or(clags > max_delay+1/bw, clags < min_delay-1/bw)
strict_EoR_window = NP.copy(EoR_window)
if coarse_channel_resolution is not None:
strict_EoR_window = NP.logical_and(EoR_window, NP.abs(clags) < 1/coarse_channel_resolution)
wedge_window = NP.logical_and(clags <= max_delay, clags >= min_delay)
non_wedge_window = NP.logical_not(wedge_window)
# vis_rms_lag = OPS.rms(asm_cc_vis_lag.reshape(-1,n_snaps), mask=NP.logical_not(NP.repeat(thermal_noise_window.reshape(-1,1), n_snaps, axis=1)), axis=0)
# vis_rms_freq = NP.abs(vis_rms_lag) / NP.sqrt(nchan) / freq_resolution
# T_rms_freq = vis_rms_freq / (2.0 * FCNST.k) * NP.mean(ia.A_eff) * NP.mean(ia.eff_Q) * NP.sqrt(2.0*freq_resolution*NP.asarray(ia.t_acc).reshape(1,-1)) * CNST.Jy
# vis_rms_lag_theory = OPS.rms(vis_noise_lag.reshape(-1,n_snaps), mask=NP.logical_not(NP.repeat(EoR_window.reshape(-1,1), n_snaps, axis=1)), axis=0)
# vis_rms_freq_theory = NP.abs(vis_rms_lag_theory) / NP.sqrt(nchan) / freq_resolution
# T_rms_freq_theory = vis_rms_freq_theory / (2.0 * FCNST.k) * NP.mean(ia.A_eff) * NP.mean(ia.eff_Q) * NP.sqrt(2.0*freq_resolution*NP.asarray(ia.t_acc).reshape(1,-1)) * CNST.Jy
vis_rms_lag = OPS.rms(asm_cc_vis_lag, mask=NP.logical_not(NP.repeat(thermal_noise_window[:,:,NP.newaxis], n_snaps, axis=2)), axis=1)
vis_rms_freq = NP.abs(vis_rms_lag) / NP.sqrt(nchan) / freq_resolution
T_rms_freq = vis_rms_freq / (2.0 * FCNST.k) * NP.mean(ia.A_eff[truncated_ref_bl_ind,:]) * NP.mean(ia.eff_Q[truncated_ref_bl_ind,:]) * NP.sqrt(2.0*freq_resolution*NP.asarray(ia.t_acc).reshape(1,1,-1)) * CNST.Jy
vis_rms_lag_theory = OPS.rms(vis_noise_lag, mask=NP.logical_not(NP.repeat(EoR_window[:,:,NP.newaxis], n_snaps, axis=2)), axis=1)
vis_rms_freq_theory = NP.abs(vis_rms_lag_theory) / NP.sqrt(nchan) / freq_resolution
T_rms_freq_theory = vis_rms_freq_theory / (2.0 * FCNST.k) * NP.mean(ia.A_eff[truncated_ref_bl_ind,:]) * NP.mean(ia.eff_Q[truncated_ref_bl_ind,:]) * NP.sqrt(2.0*freq_resolution*NP.asarray(ia.t_acc).reshape(1,1,-1)) * CNST.Jy
if max_abs_delay is not None:
small_delays_ind = NP.abs(clean_lags) <= max_abs_delay * 1e-6
eor_small_delays_ind = NP.abs(eor_ia.lags) <= max_abs_delay * 1e-6
clean_lags = clean_lags[small_delays_ind]
asm_cc_vis_lag = asm_cc_vis_lag[:,small_delays_ind,:]
asm_cc_skyvis_lag = asm_cc_skyvis_lag[:,small_delays_ind,:]
dsm_cc_vis_lag = dsm_cc_vis_lag[:,small_delays_ind,:]
dsm_cc_skyvis_lag = dsm_cc_skyvis_lag[:,small_delays_ind,:]
csm_cc_vis_lag = csm_cc_vis_lag[:,small_delays_ind,:]
csm_cc_skyvis_lag = csm_cc_skyvis_lag[:,small_delays_ind,:]
eor_skyvis_lag = eor_skyvis_lag[:,eor_small_delays_ind,:]
if (dspec_min is None) or (dspec_max is None):
# dspec_min = NP.abs(asm_cc_vis_lag).min()
dspec_min = NP.abs(eor_skyvis_lag).min()
dspec_max = NP.abs(asm_cc_vis_lag).max()
dspec_min = dspec_min**2 * volfactor1 * volfactor2 * Jy2K**2
dspec_max = dspec_max**2 * volfactor1 * volfactor2 * Jy2K**2
cardinal_blo = 180.0 / n_bins_baseline_orientation * (NP.arange(n_bins_baseline_orientation)-1).reshape(-1,1)
cardinal_bll = 100.0
cardinal_bl = cardinal_bll * NP.hstack((NP.cos(NP.radians(cardinal_blo)), NP.sin(NP.radians(cardinal_blo)), NP.zeros_like(cardinal_blo)))
small_delays_EoR_window = EoR_window.T
small_delays_strict_EoR_window = strict_EoR_window.T
small_delays_wedge_window = wedge_window.T
if max_abs_delay is not None:
small_delays_EoR_window = small_delays_EoR_window[small_delays_ind,:]
small_delays_strict_EoR_window = small_delays_strict_EoR_window[small_delays_ind,:]
small_delays_wedge_window = small_delays_wedge_window[small_delays_ind,:]
small_delays_non_wedge_window = NP.logical_not(small_delays_wedge_window)
backdrop_xsize = 500
xmin = -180.0
xmax = 180.0
ymin = -90.0
ymax = 90.0
xgrid, ygrid = NP.meshgrid(NP.linspace(xmax, xmin, backdrop_xsize), NP.linspace(ymin, ymax, backdrop_xsize/2))
xvect = xgrid.ravel()
yvect = ygrid.ravel()
pb_snapshots = []
pbx_MWA_snapshots = []
pby_MWA_snapshots = []
src_ind_csm_snapshots = []
dsm_snapshots = []
m1, m2, d12 = GEOM.spherematch(pointings_radec[:,0], pointings_radec[:,1], anttemp_pointings_radec[:,0], anttemp_pointings_radec[:,1], nnearest=1)
# Construct the bright point source catalog
freq_SUMSS = 0.843 # in GHz
SUMSS_file = '/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt'
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
if spindex_seed is None:
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
else:
NP.random.seed(spindex_seed)
spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = freq_SUMSS*1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('SUMSS', fint.size)
ra_deg = ra_deg_SUMSS + 0.0
dec_deg = dec_deg_SUMSS
spindex = spindex_SUMSS
majax = fmajax/3.6e3
minax = fminax/3.6e3
fluxes = NP.copy(fint)
nvss_file = '/data3/t_nithyanandan/project_MWA/foregrounds/NVSS_catalog.fits'
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(nvss_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
if spindex_seed is None:
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
else:
NP.random.seed(2*spindex_seed)
spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))
nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]
freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + | |
import random
import numpy as np
from chessbot_utils import fast_predict, create_piece_decoder, flatten_board
from math import inf
import chess
import chess.engine
# import cProfile
# import pstats
class Player:
def __init__(
self, team: int, encoder: dict, score_encoder: dict, boosted_rewards: bool
):
self.team = 1 if team == 1 else -1
self.encoder = encoder
self.score_encoder = score_encoder
self.decoder = create_piece_decoder()
self.boosted_rewards = boosted_rewards
self.gamma = np.exp(np.log(0.5) / 10)
self.history = []
self.ignore_types = [".", "1", "-1"]
self.pieces = ["P", "N", "B", "R", "Q", "K"]
if self.team != 1:
self.pieces = [piece.lower() for piece in self.pieces]
def generate_move(self, env):
starting_board = self.encode_env(env, self.team)
action = random.sample(list(env.legal_moves), 1)[0]
env.push(action)
chosen_board = self.encode_env(env, self.team)
#print(chosen_board[-1])
self.history.append([starting_board, chosen_board, action])
return action
def convert_board(self, board: list):
_board = list(reversed(board))
for i in range(len(_board)):
if _board[i] == _board[i].upper():
_board[i] = _board[i].lower()
elif _board[i] == _board[i].lower():
_board[i] = _board[i].upper()
return _board
def encode_env(self, env, team) -> list:
env_list= str(env).split()
if team != 1:
env_list = self.convert_board(env_list)
temp = [self.encoder[x] for x in env_list]
_scores = self.get_board_score(temp, team)
temp.append([float(_scores[0])/float(_scores[1])])
return temp
def repeat_move_adjuster(self, action, past_index) -> float:
if len(self.history) < abs(past_index):
return 0
if past_index == -2:
if self.history[past_index][2] == action:
return -0.3
return 0
else:
result = 0
if self.history[past_index][2] == action:
result -= (0.3 / abs(past_index))
return result + self.repeat_move_adjuster(action, past_index + 1)
def get_board_score(self, encoded_board, team, reverse=False) -> tuple: # returns piece score per player
my_score = 0
enemy_score = 0
remainder = len(encoded_board)-64
if team == self.team:
scan_team = 1
else:
scan_team = -1
if reverse:
scan_team *= -1
if scan_team == 1:
for i in range(len(encoded_board)-remainder):
_score = self.score_encoder[tuple(encoded_board[i])]
if abs(_score) > 0:
if _score > 0:
my_score += abs(_score)
else:
enemy_score += abs(_score)
else:
for i in range(len(encoded_board)-remainder):
_score = self.score_encoder[tuple(encoded_board[i])]
if abs(_score) > 0:
if _score < 0:
my_score += abs(_score)
else:
enemy_score += abs(_score)
return my_score, enemy_score
def get_enemy_greedy_move(self, env):
starting_board = self.encode_env(env, self.team)
player_score = self.get_board_score(starting_board, -self.team, reverse=True)[1]
best_score = -inf
best_move = None
#print("===================")
for action in env.legal_moves:
env.push(action)
encoded_board = self.encode_env(env, self.team*-1)
pred = player_score - self.get_board_score(starting_board, -self.team, reverse=True)[1]
if pred != 0:
print(f"{player_score=}, {pred=}")
result = self.terminal_balancer(env, encoded_board, self.team * -1, reverse=True)
if result >= 1:
pred += 100
elif result <= -0.5:
pred -= 10
env.pop()
if pred > best_score:
best_score = pred
best_move = action
return best_score, best_move
def move_reward_generator(self, index: int) -> float:
if index < len(self.history) - 1:
my_starting_points, enemy_starting_points = self.get_board_score(self.history[index][0], self.team)
my_ending_points, enemy_ending_points = self.get_board_score(self.history[index+1][0], self.team)
my_loss = my_starting_points - my_ending_points
enemy_loss = enemy_starting_points - enemy_ending_points
points = enemy_loss - my_loss
p = min([1, points / 9])
p = max([-1, p])
return p
return 0
def finalize_data(self, reward: float, draw=False) -> list:
labeled_data = []
enemy_team = 1 if self.team != 1 else -1
base_reward = 0
if draw:
my_score, enemy_score = self.get_board_score(self.history[-1][1], self.team)
if my_score > enemy_score:
labeled_data.append([flatten_board(self.history[-1][1]), reward])
else:
for i in range(len(self.history)-1):
turn_reward = base_reward
if self.boosted_rewards:
turn_reward += self.move_reward_generator(i)*0.33334
labeled_data.append([flatten_board(self.history[i][1]), turn_reward])
labeled_data.append([flatten_board(self.history[-1][1]), reward])
self.distribute_rewards(labeled_data)
return labeled_data
def distribute_rewards(self, labeled_data):
if len(labeled_data) > 0:
distributed_rewards = [0]*len(labeled_data)
for index, entry in enumerate(labeled_data):
reward = entry[1]*1
for i in range(index):
reward *= self.gamma
distributed_rewards[index-i] += reward
highest = abs(max(distributed_rewards, key=abs))
if highest != 0:
for i in range(len(distributed_rewards)):
labeled_data[i][1] = distributed_rewards[i] / highest
def quit(self):
pass
def __repr__(self):
return f"Random mover {'WHITE' if self.team == 1 else 'BLACK'} "
class chess_engine(Player):
def __init__(
self,
team: int,
encoder: dict,
score_encoder: dict,
boosted_rewards: bool,
engine_path: str,
time_limit: float,
engine_name: str,
):
super().__init__(team, encoder, score_encoder, boosted_rewards)
self.engine = chess.engine.SimpleEngine.popen_uci(engine_path)
self.time_limit = time_limit
self.name = engine_name
def __repr__(self):
return f"{self.name} chess engine {'BLACK' if self.team == -1 else 'WHITE'}"
def generate_move(self, env):
starting_board = self.encode_env(env, self.team)
action = self.engine.play(env, chess.engine.Limit(time=self.time_limit)).move
env.push(action)
chosen_board = self.encode_env(env, self.team)
self.history.append([starting_board, chosen_board, action])
return action
def quit(self):
self.engine.quit()
class ML_Player(Player):
def __init__(
self,
team: int,
encoder: dict,
score_encoder: dict,
boosted_rewards: bool,
model,
epsilon=0.98,
):
super().__init__(team, encoder, score_encoder, boosted_rewards)
self.model = model
self.epsilon = epsilon
self.max_varience = 0.05
self.random_count = 0
self.greedy = False
def terminal_balancer(self, env, encoded_board, team, reverse=False) -> float:
result = env.result()
my_score, enemy_score = self.get_board_score(encoded_board, team, reverse=reverse)
if result != "*":
if result == "1-0":
if team == 1:
return 1
else:
return -1
elif result == "0-1":
if team == -1:
return 1
else:
return -1
elif result == "1/2-1/2":
ratio = my_score / enemy_score
return 0.5 if ratio <= 0.85 else -0.5
else:
legal_moves = list(env.legal_moves)
ratio = enemy_score / my_score
for action in legal_moves:
env.push(action)
result = env.result()
env.pop()
if result == "1-0" and team != 1:
return -1
elif result == "0-1" and team != -1:
return -1
elif result == "1/2-1/2":
if ratio < 1:
return -0.5
return 0
def get_enemy_move(self, env) -> float:
starting_board = self.encode_env(env, self.team*-1)
best_score = -inf
for action in env.legal_moves:
env.push(action)
encoded_board = self.encode_env(env, self.team * -1)
pred_board = flatten_board(encoded_board)
pred_board = np.array([pred_board])
pred = fast_predict(pred_board, self.model)
#pred += self.terminal_balancer(env, encoded_board, self.team * -1, reverse=True)
env.pop()
if pred > best_score:
best_score = pred
return best_score
def get_greedy_move(self, env):
starting_board = self.encode_env(env, self.team)
best_move = None
chosen_board = None
enemy_team = self.team * -1
enemy_score = self.get_board_score(starting_board, self.team)[1]
best_score = -inf
difference = 0
enemy_score = 0
starting = str(env)
for action in env.legal_moves:
env.push(action)
encoded_board = self.encode_env(env, self.team)
resulting_score = self.get_board_score(encoded_board, self.team)[1]
player_attack = enemy_score - resulting_score
enemy_attack = self.get_enemy_greedy_move(env)[0]
pred = player_attack - enemy_attack
if pred > best_score or (pred == best_score and (abs(player_attack) + abs(enemy_attack)) > difference):
best_score = pred
best_move = action
chosen_board = encoded_board
difference = player_attack - enemy_attack
enemy_score = enemy_attack
ending = str(env)
env.pop()
return best_move, difference, chosen_board
def generate_move(self, env):
starting_board = self.encode_env(env, self.team)
best_move = None
chosen_board = None
best_score = -inf
legal_moves = list(env.legal_moves)
if not self.greedy and random.random() >= self.epsilon:
move = random.sample(list(env.legal_moves), 1)[0]
env.push(move)
encoded_board = self.encode_env(env, self.team)
best_move = move
chosen_board = encoded_board
self.random_count += 1
env.pop()
if best_move is None:
if self.greedy:
greedy_move, diff, greedy_board = self.get_greedy_move(env)
else:
diff = 0
if diff != 0:
best_move = greedy_move
chosen_board = greedy_board
else:
moves_list = []
for action in legal_moves:
env.push(action)
encoded_board = self.encode_env(env, self.team)
pred_board = flatten_board(encoded_board)
pred_board = np.array([pred_board])
pred = fast_predict(pred_board, self.model)
pred += self.repeat_move_adjuster(action, -4)
if pred > best_score:
best_score = pred
moves_list.append([pred, action, encoded_board])
env.pop()
moves_list.sort(key=lambda x: float(x[0]), reverse=True)
highest_index = 0
for m in moves_list:
highest_index += 1
if best_score - m[0] < self.max_varience:
break
move = random.choice(moves_list[:highest_index])
best_move = move[1]
chosen_board = move[2]
if best_move is None:
best_move = legal_moves[0]
if chosen_board is not None:
self.history.append([starting_board, chosen_board, best_move])
return best_move
def __repr__(self):
return f"ML bot {'WHITE' if self.team == 1 else 'BLACK'} "
class ML_Player_Trainer(ML_Player):
def __init__(
self,
team: int,
encoder: dict,
score_encoder: dict,
boosted_rewards: bool,
model,
epsilon=1,
):
super().__init__(team, encoder, score_encoder, boosted_rewards, model, epsilon)
self.greedy = False
def __repr__(self):
return f"ML trainer bot {'WHITE' if self.team == 1 else 'BLACK'} "
class ML_Distiller(ML_Player):
def __init__(
self,
team: int,
encoder: dict,
score_encoder: dict,
boosted_rewards: bool,
model,
epsilon=1,
depth=2,
width=100,
speed_hacks=False,
):
super().__init__(team, encoder, score_encoder, boosted_rewards, model, epsilon)
self.depth = depth
self.width = width
self.speed_hacks = speed_hacks
self.speed_barrier = 0
def recursive_move_selector(self, env, team, legal_moves, depth):
_env = env.copy(stack=False)
starting_board = self.encode_env(_env, team)
if depth <= 1:
temp = self._generate_move(_env, team)
return temp[1], temp[0]
best_score = -999999
best_move = None
shallow_results = []
for action in env.legal_moves:
env.push(action)
encoded_board = self.encode_env(env, team)
pred_board = np.array([starting_board+encoded_board])
pred = fast_predict(pred_board, self.model)
#pred += self.terminal_balancer(env, encoded_board, team)
pred += self.repeat_move_adjuster(action, -4)
env.pop()
shallow_results.append([pred, action])
sorted_results = sorted(shallow_results, key=lambda x: x[0], reverse=True)
projected_data = []
for i in range(min(len(sorted_results), self.width)):
move = sorted_results[i][1]
_env.push(move)
pred = sorted_results[i][0]
enemy_move = self._generate_move(_env, team * -1)[0]
if enemy_move != None:
_env.push(enemy_move)
pred += self.recursive_move_selector(
_env, team, list(_env.legal_moves), depth - 1
)[0]
_env.pop()
_env.pop()
if pred > best_score:
best_score = pred
best_move = move
if best_move != None:
return best_score, best_move
else:
return 0, legal_moves[0] if len(legal_moves) > | |
if the artifact has existing alaises.
"""
mutation = gql(
"""
mutation DeleteArtifact($artifactID: ID!, $deleteAliases: Boolean) {
deleteArtifact(input: {
artifactID: $artifactID
deleteAliases: $deleteAliases
}) {
artifact {
id
}
}
}
"""
)
self.client.execute(
mutation,
variable_values={"artifactID": self.id, "deleteAliases": delete_aliases,},
)
return True
def new_file(self, name, mode=None):
raise ValueError("Cannot add files to an artifact once it has been saved")
def add_file(self, local_path, name=None, is_tmp=False):
raise ValueError("Cannot add files to an artifact once it has been saved")
def add_dir(self, path, name=None):
raise ValueError("Cannot add files to an artifact once it has been saved")
def add_reference(self, uri, name=None, checksum=True, max_objects=None):
raise ValueError("Cannot add files to an artifact once it has been saved")
def add(self, obj, name):
raise ValueError("Cannot add files to an artifact once it has been saved")
def _add_download_root(self, dir_path):
"""Adds `dir_path` as one of the known directories which this
artifact treated as a root"""
self._download_roots.add(os.path.abspath(dir_path))
def _is_download_root(self, dir_path):
"""Determines if `dir_path` is a directory which this artifact as
treated as a root for downloading"""
return dir_path in self._download_roots
def _local_path_to_name(self, file_path):
"""Converts a local file path to a path entry in the artifact"""
abs_file_path = os.path.abspath(file_path)
abs_file_parts = abs_file_path.split(os.sep)
for i in range(len(abs_file_parts) + 1):
if self._is_download_root(os.path.join(os.sep, *abs_file_parts[:i])):
return os.path.join(*abs_file_parts[i:])
return None
def _get_obj_entry(self, name):
"""
When objects are added with `.add(obj, name)`, the name is typically
changed to include the suffix of the object type when serializing to JSON. So we need
to be able to resolve a name, without tasking the user with appending .THING.json.
This method returns an entry if it exists by a suffixed name.
Args:
name: (str) name used when adding
"""
self._load_manifest()
type_mapping = WBValue.type_mapping()
for artifact_type_str in type_mapping:
wb_class = type_mapping[artifact_type_str]
wandb_file_name = wb_class.with_suffix(name)
entry = self._manifest.entries.get(wandb_file_name)
if entry is not None:
return entry, wb_class
return None, None
def get_path(self, name):
manifest = self._load_manifest()
entry = manifest.entries.get(name)
if entry is None:
entry = self._get_obj_entry(name)[0]
if entry is None:
raise KeyError("Path not contained in artifact: %s" % name)
else:
name = entry.path
return _DownloadedArtifactEntry(name, entry, self)
def get(self, name):
entry, wb_class = self._get_obj_entry(name)
if entry is not None:
# If the entry is a reference from another artifact, then get it directly from that artifact
if self._manifest_entry_is_artifact_reference(entry):
artifact = self._get_ref_artifact_from_entry(entry)
return artifact.get(util.uri_from_path(entry.ref))
# Special case for wandb.Table. This is intended to be a short term optimization.
# Since tables are likely to download many other assets in artifact(s), we eagerly download
# the artifact using the parallelized `artifact.download`. In the future, we should refactor
# the deserialization pattern such that this special case is not needed.
if wb_class == wandb.Table:
self.download(recursive=True)
# Get the ArtifactEntry
item = self.get_path(entry.path)
item_path = item.download()
# Load the object from the JSON blob
result = None
json_obj = {}
with open(item_path, "r") as file:
json_obj = json.load(file)
result = wb_class.from_json(json_obj, self)
result._set_artifact_source(self, name)
return result
def download(self, root=None, recursive=False):
dirpath = root or self._default_root()
self._add_download_root(dirpath)
manifest = self._load_manifest()
nfiles = len(manifest.entries)
size = sum(e.size for e in manifest.entries.values())
log = False
if nfiles > 5000 or size > 50 * 1024 * 1024:
log = True
termlog(
"Downloading large artifact %s, %.2fMB. %s files... "
% (self._artifact_name, size / (1024 * 1024), nfiles),
newline=False,
)
start_time = datetime.datetime.now()
# Force all the files to download into the same directory.
# Download in parallel
import multiprocessing.dummy # this uses threads
pool = multiprocessing.dummy.Pool(32)
pool.map(partial(self._download_file, root=dirpath), manifest.entries)
if recursive:
pool.map(lambda artifact: artifact.download(), self._dependent_artifacts)
pool.close()
pool.join()
self._is_downloaded = True
if log:
delta = relativedelta(datetime.datetime.now() - start_time)
termlog(
"Done. %s:%s:%s" % (delta.hours, delta.minutes, delta.seconds),
prefix=False,
)
return dirpath
def checkout(self, root=None):
dirpath = root or self._default_root(include_version=False)
for root, _, files in os.walk(dirpath):
for file in files:
full_path = os.path.join(root, file)
artifact_path = util.to_forward_slash_path(
os.path.relpath(full_path, start=dirpath)
)
try:
self.get_path(artifact_path)
except KeyError:
# File is not part of the artifact, remove it.
os.remove(full_path)
return self.download(root=dirpath)
def verify(self, root=None):
dirpath = root or self._default_root()
manifest = self._load_manifest()
ref_count = 0
for root, _, files in os.walk(dirpath):
for file in files:
full_path = os.path.join(root, file)
artifact_path = util.to_forward_slash_path(
os.path.relpath(full_path, start=dirpath)
)
try:
self.get_path(artifact_path)
except KeyError:
raise ValueError(
"Found file {} which is not a member of artifact {}".format(
full_path, self.name
)
)
for entry in manifest.entries.values():
if entry.ref is None:
if (
artifacts.md5_file_b64(os.path.join(dirpath, entry.path))
!= entry.digest
):
raise ValueError("Digest mismatch for file: %s" % entry.path)
else:
ref_count += 1
if ref_count > 0:
print("Warning: skipped verification of %s refs" % ref_count)
def file(self, root=None):
"""Download a single file artifact to dir specified by the <root>
Arguments:
root: (str, optional) The root directory in which to place the file. Defaults to './artifacts/<self.name>/'.
Returns:
(str): The full path of the downloaded file.
"""
if root is None:
root = os.path.join(".", "artifacts", self.name)
manifest = self._load_manifest()
nfiles = len(manifest.entries)
if nfiles > 1:
raise ValueError(
"This artifact contains more than one file, call `.download()` to get all files or call "
'.get_path("filename").download()'
)
return self._download_file(list(manifest.entries)[0], root=root)
def _download_file(self, name, root):
# download file into cache and copy to target dir
return self.get_path(name).download(root)
def _default_root(self, include_version=True):
root = (
os.path.join(".", "artifacts", self.name)
if include_version
else os.path.join(".", "artifacts", self._sequence_name)
)
if platform.system() == "Windows":
head, tail = os.path.splitdrive(root)
root = head + tail.replace(":", "-")
return root
def json_encode(self):
return util.artifact_to_json(self)
@normalize_exceptions
def save(self):
"""
Persists artifact changes to the wandb backend.
"""
mutation = gql(
"""
mutation updateArtifact(
$artifactID: ID!,
$description: String,
$metadata: JSONString,
$aliases: [ArtifactAliasInput!]
) {
updateArtifact(input: {
artifactID: $artifactID,
description: $description,
metadata: $metadata,
aliases: $aliases
}) {
artifact {
id
}
}
}
"""
)
self.client.execute(
mutation,
variable_values={
"artifactID": self.id,
"description": self.description,
"metadata": util.json_dumps_safer(self.metadata),
"aliases": [
{"artifactCollectionName": self._sequence_name, "alias": alias,}
for alias in self._aliases
],
},
)
return True
def wait(self):
return self
# TODO: not yet public, but we probably want something like this.
def _list(self):
manifest = self._load_manifest()
return manifest.entries.keys()
def __repr__(self):
return "<Artifact {}>".format(self.id)
def _load(self):
query = gql(
"""
query Artifact(
$entityName: String!,
$projectName: String!,
$name: String!
) {
project(name: $projectName, entityName: $entityName) {
artifact(name: $name) {
...ArtifactFragment
}
}
}
%s
"""
% ARTIFACT_FRAGMENT
)
response = None
try:
response = self.client.execute(
query,
variable_values={
"entityName": self.entity,
"projectName": self.project,
"name": self._artifact_name,
},
)
except Exception:
# we check for this after doing the call, since the backend supports raw digest lookups
# which don't include ":" and are 32 characters long
if ":" not in self._artifact_name and len(self._artifact_name) != 32:
raise ValueError(
'Attempted to fetch artifact without alias (e.g. "<artifact_name>:v3" or "<artifact_name>:latest")'
)
if (
response is None
or response.get("project") is None
or response["project"].get("artifact") is None
):
raise ValueError(
'Project %s/%s does not contain artifact: "%s"'
% (self.entity, self.project, self._artifact_name)
)
self._attrs = response["project"]["artifact"]
return self._attrs
# The only file should be wandb_manifest.json
def _files(self, names=None, per_page=50):
return ArtifactFiles(self.client, self, names, per_page)
def _load_manifest(self):
if self._manifest is None:
query = gql(
"""
query ArtifactManifest(
$entityName: String!,
$projectName: String!,
$name: String!
) {
project(name: $projectName, entityName: $entityName) {
artifact(name: $name) {
currentManifest {
id
file {
id
directUrl
}
}
}
}
}
"""
)
response = self.client.execute(
query,
variable_values={
"entityName": self.entity,
"projectName": self.project,
"name": self._artifact_name,
},
)
index_file_url = response["project"]["artifact"]["currentManifest"]["file"][
"directUrl"
]
with requests.get(index_file_url) as req:
req.raise_for_status()
self._manifest = artifacts.ArtifactManifest.from_manifest_json(
self, json.loads(six.ensure_text(req.content))
)
self._load_dependent_manifests()
return self._manifest
def _load_dependent_manifests(self):
"""Helper function to interrogate entries and ensure we have loaded their manifests"""
# Make sure dependencies are avail
for entry_key in self._manifest.entries:
entry = self._manifest.entries[entry_key]
if self._manifest_entry_is_artifact_reference(entry):
dep_artifact = self._get_ref_artifact_from_entry(entry)
if dep_artifact not in self._dependent_artifacts:
dep_artifact._load_manifest()
self._dependent_artifacts.append(dep_artifact)
@staticmethod
def _manifest_entry_is_artifact_reference(entry):
"""Helper function determines if an ArtifactEntry in manifest is an artifact reference"""
return (
entry.ref is not None
and urllib.parse.urlparse(entry.ref).scheme == "wandb-artifact"
)
def _get_ref_artifact_from_entry(self, entry):
"""Helper function returns the referenced artifact from an entry"""
artifact_id = util.host_from_path(entry.ref)
return Artifact.from_id(util.hex_to_b64_id(artifact_id), self.client)
def used_by(self):
"""Retrieves the runs which use this artifact directly
Returns:
[Run]: a list of Run | |
or
signal_v_field not in radar.fields):
warn('Unable to obtain spectral differential reflectivity. ' +
'Missing fields')
return None, None
subtract_noise = dscfg.get('subtract_noise', False)
lag = dscfg.get('lag', 0)
zdr = pyart.retrieve.compute_differential_reflectivity_iq(
radar, subtract_noise=subtract_noise, lag=lag,
signal_h_field=signal_h_field, signal_v_field=signal_v_field,
noise_h_field=noise_h_field, noise_v_field=noise_v_field)
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field('differential_reflectivity', zdr)
return new_dataset, ind_rad
def process_mean_phase_iq(procstatus, dscfg, radar_list=None):
"""
Computes the mean phase from the horizontal or vertical IQ data
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
radarnr, _, datatype, _, _ = get_datatype_fields(dscfg['datatype'][0])
signal_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if signal_field not in radar.fields:
warn('Unable to obtain MPH. Missing fields')
return None, None
mph = pyart.retrieve.compute_mean_phase_iq(
radar, signal_field=signal_field)
mean_phase_field = 'mean_phase'
if signal_field == 'IQ_vv_ADU':
mean_phase_field += '_vv'
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field(mean_phase_field, mph)
return new_dataset, ind_rad
def process_differential_phase_iq(procstatus, dscfg, radar_list=None):
"""
Computes the differential phase from the horizontal and vertical IQ data
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
phase_offset : float. Dataset keyword
The system differential phase offset to remove
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
signal_h_field = None
signal_v_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'IQhhADU':
signal_h_field = get_fieldname_pyart(datatype)
elif datatype == 'IQvvADU':
signal_v_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if (signal_h_field not in radar.fields or
signal_v_field not in radar.fields):
warn('Unable to obtain PhiDP. Missing fields')
return None, None
phase_offset = dscfg.get('phase_offset', 0.)
uphidp = pyart.retrieve.compute_differential_phase_iq(
radar, phase_offset=phase_offset, signal_h_field=signal_h_field,
signal_v_field=signal_v_field)
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field(
'uncorrected_differential_phase', uphidp)
return new_dataset, ind_rad
def process_rhohv_iq(procstatus, dscfg, radar_list=None):
"""
Computes RhoHV from the horizontal and vertical IQ data
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
subtract_noise : Bool
If True noise will be subtracted from the signal
lag : int
Time lag used in the computation
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_h_field = None
noise_v_field = None
signal_h_field = None
signal_v_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'IQhhADU':
signal_h_field = get_fieldname_pyart(datatype)
elif datatype == 'IQvvADU':
signal_v_field = get_fieldname_pyart(datatype)
elif datatype == 'IQNADUh':
noise_h_field = get_fieldname_pyart(datatype)
elif datatype == 'IQNADUv':
noise_v_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if (signal_h_field not in radar.fields or
signal_v_field not in radar.fields):
warn('Unable to obtain RhoHV. Missing fields')
return None, None
subtract_noise = dscfg.get('subtract_noise', False)
lag = dscfg.get('lag', 0)
rhohv = pyart.retrieve.compute_rhohv_iq(
radar, subtract_noise=subtract_noise, lag=lag,
signal_h_field=signal_h_field, signal_v_field=signal_v_field,
noise_h_field=noise_h_field, noise_v_field=noise_v_field)
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field('cross_correlation_ratio', rhohv)
return new_dataset, ind_rad
def process_Doppler_velocity_iq(procstatus, dscfg, radar_list=None):
"""
Compute the Doppler velocity from the spectral reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
direction : str
The convention used in the Doppler mean field. Can be
negative_away or negative_towards
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('IQhhADU', 'IQvvADU'):
signal_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if signal_field not in radar.fields:
warn('Unable to obtain Doppler velocity. ' +
'Missing field '+signal_field)
return None, None
direction = dscfg.get('direction', 'negative_away')
vel = pyart.retrieve.compute_Doppler_velocity_iq(
radar, signal_field=signal_field, direction=direction)
vel_field = 'velocity'
if signal_field == 'IQ_vv_ADU':
vel_field += '_vv'
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field(vel_field, vel)
return new_dataset, ind_rad
def process_Doppler_width_iq(procstatus, dscfg, radar_list=None):
"""
Compute the Doppler spectrum width from the spectral reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
subtract_noise : Bool
If True noise will be subtracted from the signals
lag : int
Time lag used in the denominator of the computation
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('IQhhADU', 'IQvvADU'):
signal_field = get_fieldname_pyart(datatype)
elif datatype in ('IQNADUh', 'IQNADUv'):
noise_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if signal_field not in radar.fields:
warn('Unable to obtain Doppler spectrum width. ' +
'Missing field '+signal_field)
return None, None
subtract_noise = dscfg.get('subtract_noise', False)
lag = dscfg.get('lag', 1)
width = pyart.retrieve.compute_Doppler_width_iq(
radar, subtract_noise=True, signal_field=signal_field,
noise_field=noise_field, lag=lag)
width_field = 'spectrum_width'
if signal_field == 'IQ_vv_ADU':
width_field += '_vv'
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field(width_field, width)
return new_dataset, ind_rad
def process_fft(procstatus, dscfg, radar_list=None):
"""
Compute the Doppler spectra form the IQ data with a Fourier transform
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
window : list of str
Parameters of the window used to obtain the spectra. The
parameters are the ones corresponding to function
scipy.signal.windows.get_window. It can also be ['None'].
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
radarnr, _, datatype, _, _ = get_datatype_fields(dscfg['datatype'][0])
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
wind_params = dscfg.get('window', ['None'])
if len(wind_params) == 1:
window = wind_params[0]
if window == 'None':
window = None
else:
try:
window = float(window)
except ValueError:
pass
else:
window = wind_params
for i in range(1, len(window)):
window[i] = float(window[i])
window = tuple(window)
fields_in_list = []
fields_out_list = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn(field_name+' not in radar')
continue
if field_name == 'IQ_hh_ADU':
fields_out_list.append('unfiltered_complex_spectra_hh_ADU')
elif field_name == 'IQ_vv_ADU':
fields_out_list.append('unfiltered_complex_spectra_vv_ADU')
elif field_name == 'IQ_noise_power_hh_ADU':
fields_out_list.append('spectral_noise_power_hh_ADU')
elif field_name == 'IQ_noiseADU_vv':
fields_out_list.append('spectral_noise_power_vv_ADU')
else:
warn(field_name+' can not be Fourier transformed')
fields_in_list.append(field_name)
radar_out = pyart.retrieve.compute_spectra(
radar, fields_in_list, fields_out_list, window=window)
# prepare for exit
new_dataset = {'radar_out': | |
filename = './' + filename
dirname = os.path.join(filebase, os.path.split(filename)[0])
try:
split_path = re.split(r'[/\\]', dirname)
subpaths = [os.path.sep.join(split_path[:i]) for i in range(1, len(split_path)+1)]
for subpath in subpaths:
if not os.path.exists(subpath):
os.makedirs(dirname)
if self.made_dir is None:
self.made_dir = subpath
if TRACE:
print(f'self.made_dir = {self.made_dir}')
break
except FileExistsError:
pass
with open(os.path.join(filebase, filename), 'wb') as fp:
cte = part.get('content-transfer-encoding', '').lower()
if cte == 'quoted-printable':
payload = part.get_payload(decode=False)
try:
bpayload = payload.encode('ascii')
except UnicodeError:
# Excel saves "quoted-printable" files that contain non-ascii Unicode
# characters, so we need to appropriately encode them in either
# css encoding or html encoding style
if filename.endswith('css'):
payload = CSSStyle.css_escape_unicode(payload)
else:
payload = CSSStyle.html_escape_unicode(payload)
bpayload = payload.encode('ascii')
payload = quopri.decodestring(bpayload)
else:
payload = part.get_payload(decode=True)
if not first_contents:
first_contents = payload
first_filename = filename
fp.write(payload)
if first_contents:
# The first file is the "workbook" that contains a frameset and frames for each sheet.
#self.text = str(first_contents, 'utf-8')
self.text = UnicodeDammit(first_contents, is_html=True).unicode_markup
self.dirname = os.path.split(os.path.join(filebase, first_filename))[0]
warnings.filterwarnings("ignore", category=UserWarning, module='bs4')
warnings.filterwarnings("ignore", category=UserWarning, module='cssutils')
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=GuessedAtParserWarning)
# the newlines are seen as bogus text elements, so get rid of them: (except this messes up <pre>)
#self.text = ' '.join(self.text.split())
#self.text = re.sub(r'''<span\s+style=["']mso-spacerun:yes["']>(\s+)</span>''', lambda m: ' ' * len(m.group(1)), self.text)
self.text = re.sub(r'>[ \n\t\r\f\v]+<', '><', self.text)
#self.text = self.text.replace('> <', '><')
stl = self.text.lower()
if '<table' not in stl and '<frame' not in stl:
if isinstance(f, str):
raise ValueError(f'No <table> tags found in {f} - maybe this is Excel 5.0/95 format? If so, try using XLS2XLSX instead.')
else:
raise ValueError('No <table> tags found - maybe this is Excel 5.0/95 format? If so, try using XLS2XLSX instead.')
self.url_soup = BeautifulSoup(self.text)
def to_xlsx(self, filename=None, workbook=None, worksheet=None, sheet_name=None):
"""Convert to xlsx using openpyxl. If filename is not None, then the result
is written to that file, and the filename is returned, else the workbook is returned.
If workbook is passed, then the worksheet is written to the given workbook"""
if workbook:
wb = workbook
ws = worksheet
else:
wb = Workbook() # Creates one worksheet
ws = wb.active
# Handle a modern "save as htm" file from excel (including converted mht files)
# <frame src="Styles_files/tabstrip.htm" name="frTabs" marginwidth=0 marginheight=0>
frames_html = self.url_soup.find_all('frame')
if frames_html:
tabstrip = frames_html[-1]
src = tabstrip['src']
tabstrip_hx2x = self.__class__(CSSStyle.join(self.dirname, src))
# <a href="sheet001.htm" target="frSheet"><font face="Arial" color="#000000">Sheet1</font></a>
a_html = tabstrip_hx2x.url_soup.find_all('a')
bn = os.path.split(CSSStyle.join(self.dirname, src))[0]
for a in a_html:
href = a['href']
sn = a.get_text().strip()
fn = CSSStyle.join(bn, href)
a_hx2x = self.__class__(fn)
a_hx2x.to_xlsx(workbook=wb, worksheet=ws, sheet_name=sn)
ws = None
if self.made_dir: # If we made the directory from a mht file, then remove it when we're done with it
try:
shutil.rmtree(self.made_dir)
if TRACE:
print(f'removed {self.made_dir}')
except Exception as e:
if TRACE:
print(f'Exception removing {self.made_dir}: {e}')
if filename:
wb.save(filename=filename)
return filename
return wb
css_style = CSSStyle()
font_utils = FontUtils()
styles_html = self.url_soup.find_all(['link', 'style'])
for s in styles_html:
if s.name == 'link':
rel = s.get('rel')
if not rel or (isinstance(rel, list) and rel[0].lower() != 'stylesheet') or \
(isinstance(rel, str) and rel.lower() != 'stylesheet'):
continue
try:
fn = s.get('href')
cs = CSSStyle.read(CSSStyle.join(self.dirname, fn), quiet=True)
if TRACE:
print(f'Writing TRACE_{fn}')
with open(f'TRACE_{fn}', 'w', encoding='utf-8') as tr:
tr.write(cs)
css_style.add_style_sheet(cs)
except Exception:
pass
else:
css_style.add_style_sheet(str(s.encode_contents(), 'utf-8'))
# Put elements that are not in a table in a table so we can handle them properly below
new_row_elems = {'p', 'br', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'div', 'pre', 'ol', 'ul', 'dl', 'hr', 'blockquote', 'footer'}
if TRACE:
print('Looking for non-table content')
body = self.url_soup.find('body')
if not body:
body = self.url_soup.new_tag('body')
while self.url_soup.contents:
elem = self.url_soup.contents[0]
body.append(elem.extract())
if 'html' in self.url_soup:
self.html.append(body)
else:
self.url_soup = body
table = None
cell = None
col_widths = {} # Measured in units, key is column letter
col_widths_no_wrap = {} # Measured in units, key is column letter
col_max_widths = {}
while True:
for elem in body.contents:
if elem == table:
continue
if isinstance(elem, (CData, Comment, ProcessingInstruction, Declaration, Doctype)):
continue
if isinstance(elem, NavigableString) and not str(elem.string).strip():
continue # Ignore whitespace between elements
if elem.name == 'table':
# If we have an extra table row at the bottom like this one, then use it to set the column
# widths, then zap it.
""" <![if supportMisalignedColumns]>
<tr height=0 style='display:none'>
<td width=87 style='width:65pt'></td>
<td width=0></td>
</tr>
<![endif]>"""
for e in elem.contents:
if isinstance(e, Declaration) and e.string == 'if supportMisalignedColumns':
r = e.find_next_sibling()
if r.name == 'tr' and 'style' in r.attrs and r.attrs['style'] == 'display:none':
cc = 0
for c in r.contents:
if c.name != 'td':
continue
cc += 1
if 'width' not in c.attrs:
continue
cl = get_column_letter(cc)
width = CSSStyle.px_to_units(CSSStyle.get_px(c.attrs['width']))
col_widths[cl] = width
col_max_widths[cl] = width
col_widths_no_wrap[cl] = width
r.extract() # Zap it!
break
table = None # We need to make a new one if needed
continue
if elem.name in ('link', 'style', 'script'):
continue
if elem.name == 'div' and 'style' in elem.attrs and 'mso-element:comment-list' in elem.attrs['style']:
continue # Ignore the cell comments which are stuck at the end of the file in a div
if TRACE:
print(f'Processing {elem}')
if not table:
if TRACE:
print('Creating table')
table = self.url_soup.new_tag('table')
table.attrs = body.attrs # Copy bgcolor, style, etc
elem.insert_after(table)
if elem.name in new_row_elems:
cell = None
if not cell:
if TRACE:
print('Creating new row and cell')
row = self.url_soup.new_tag('tr')
cell = self.url_soup.new_tag('td')
row.append(cell)
table.append(row)
if TRACE:
print('Adding element to cell')
extracted = elem.extract()
cell.append(extracted)
break # Start over if we made a change
else:
break
if TRACE:
print(f'Styles = \n{css_style}')
def str_size(s, font, alignment, max_width, max_height, keep_newlines=False, fully_merged=False):
"""Get approximate width, height of a string, expressed in pixels."""
is_bold = font.b
font_size = font.sz
if not font_size:
font_size = 11
wrap_text = alignment.wrap_text
rotation = alignment.textRotation
height = FontUtils.pt_to_px(font_size) * FontUtils.LINE_HEIGHT_FACTOR
def almost_vertical(rotation):
# 180 means -90 and 255 means vertical with each letter rotated also
return 75 <= rotation <= 90 or 165 <= rotation <= 180 or rotation == 255
if alignment.shrink_to_fit:
width, _ = font_utils.get_font_size(font, 'a')
elif wrap_text:
lines = s.split('\n')
# Compute the width of the widest line
mx = 0
for line in lines:
w, h = str_size(line, font, Alignment(wrap_text=False, textRotation=rotation), max_width, max_height)
mx = max(mx, w)
mx = min(mx, max_width)
my = font_utils.lines_needed(mx, s, font) * height
# Attempt to optimize the cell size
width = mx
height1 = height
height = my
if not fully_merged: # If we don't span the whole width, try to optimize the size
if mx/my >= 2:
# Speed this up by pre-computing the value based on the area of the rectangle
area = width * height
# The area of the desired result with a 2:1 aspect ratio is mx * my = (2*my)*my = 2*my**2
# so we solve for my.
my = math.sqrt(area / 2.0)
if my < max_height:
mx = 2 * my
width = mx
my = font_utils.lines_needed(mx, s, font) * height1
height = my
else:
my = height
while my < max_height and mx/my >= 2: # Go for a 2:1 aspect ratio if possible
width = mx
height = my
mx = mx * 0.9
my = font_utils.lines_needed(mx, s, font) * height1
if TRACE:
print(f'str_size: trying {mx, my}')
elif keep_newlines:
height = 0
width = 0
for line in s.split('\n'):
w, h = font_utils.get_font_size(font, line)
width = max(width, w)
height += h
else:
width, height = font_utils.get_font_size(font, s.replace('\n', ''))
if width > max_width: # This line will be split
splits = math.ceil(width / max_width)
width = max_width
height *= splits
if height > max_height:
height = max_height
if TRACE:
print(f'str_size({s}, is_bold={is_bold}, font_size={font_size}, font_name={font.name}, rotation={rotation}, wrap_text={wrap_text}, max_width={max_width}, max_height={max_height}, keep_newlines={keep_newlines}) = {width, height}')
if almost_vertical(rotation):
return (height, width)
return (width, height)
tables_html = self.url_soup.find_all("table")
if sheet_name is None:
m = re.search(r'<x:Name>([^<]+?)</x:Name>', self.text)
if m:
sheet_name = m.group(1)
else:
sheet_name = 'Sheet1'
if ws:
ws.title = sheet_name
else:
ws = wb.create_sheet(sheet_name)
ws.sheet_view.showGridLines = False
def type_it(value, number_format=None):
if not value:
| |
elif type == kThreadWaitPThreadRWLockRead:
s += "pthread rwlock %x for reading" % context
elif type == kThreadWaitPThreadRWLockWrite:
s += "pthread rwlock %x for writing" % context
elif type == kThreadWaitPThreadCondVar:
s += "pthread condvar %x" % context
elif type == kThreadWaitWorkloopSyncWait:
s += "workloop sync wait"
if owner == STACKSHOT_WAITOWNER_SUSPENDED:
s += ", suspended"
elif owner == STACKSHOT_WAITOWNER_THREQUESTED:
s += ", thread requested"
elif owner != 0:
s += ", owned by thread %u" % owner
else:
s += ", unknown owner"
s += ", workloop id %x" % context
elif type == kThreadWaitOnProcess:
if owner == 2**64-1:
s += "waitpid, for any children"
elif 2**32 <= owner and owner < 2**64-1:
s += "waitpid, for process group %d" % abs(owner - 2**64)
else:
s += "waitpid, for pid %d" % owner
else:
s += "unknown type %d (owner %d, context %x)" % (type, owner, context)
return s
def formatTurnstileInfo(ti):
if ti is None:
return " [no turnstile]"
ts_flags = int(ti['turnstile_flags'])
ctx = int(ti['turnstile_context'])
hop = int(ti['number_of_hops'])
prio = int(ti['turnstile_priority'])
if ts_flags & STACKSHOT_TURNSTILE_STATUS_LOCKED_WAITQ:
return " [turnstile was in process of being updated]"
if ts_flags & STACKSHOT_TURNSTILE_STATUS_WORKQUEUE:
return " [blocked on workqueue: 0x%x, hops: %x, priority: %d]" % (ctx, hop, prio)
if ts_flags & STACKSHOT_TURNSTILE_STATUS_THREAD:
return " [blocked on: %d, hops: %x, priority: %d]" % (ctx, hop, prio)
if ts_flags & STACKSHOT_TURNSTILE_STATUS_UNKNOWN:
return " [turnstile with unknown inheritor]"
return " [unknown turnstile status!]"
def formatWaitInfoWithTurnstiles(waitinfos, tsinfos):
wis_tis = []
for w in waitinfos:
found_pair = False
for t in tsinfos:
if int(w['waiter']) == int(t['waiter']):
wis_tis.append((w, t))
found_pair = True
break
if not found_pair:
wis_tis.append((w, None))
return map(lambda (wi, ti): formatWaitInfo(wi) + formatTurnstileInfo(ti), wis_tis)
def SaveStackshotReport(j, outfile_name, incomplete):
import time
from operator import itemgetter, attrgetter
ss = j.get('kcdata_stackshot')
if not ss:
print "No KCDATA_BUFFER_BEGIN_STACKSHOT object found. Skipping writing report."
return
timestamp = ss.get('usecs_since_epoch', int(time.time()))
try:
timestamp = time.strftime("%Y-%m-%d %H:%M:%S %z",time.gmtime(timestamp))
except ValueError, e:
print "couldn't convert timestamp:", str(e)
timestamp = None
os_version = ss.get('osversion', 'Unknown')
timebase = ss.get('mach_timebase_info', {"denom": 1, "numer": 1})
dsc_common = None
shared_cache_info = ss.get('shared_cache_dyld_load_info')
if shared_cache_info:
shared_cache_base_addr = shared_cache_info['imageSlidBaseAddress']
dsc_common = [format_uuid(shared_cache_info['imageUUID']), shared_cache_info['imageSlidBaseAddress'], "S" ]
print "Shared cache UUID found from the binary data is <%s> " % str(dsc_common[0])
dsc_layout = ss.get('system_shared_cache_layout')
dsc_libs = []
if dsc_layout:
print "Found in memory system shared cache layout with {} images".format(len(dsc_layout))
slide = ss.get('shared_cache_dyld_load_info')['imageLoadAddress']
for image in dsc_layout:
dsc_libs.append([format_uuid(image['imageUUID']), image['imageLoadAddress'] + slide, "C"])
AllImageCatalog = []
obj = {}
obj["kernel"] = os_version
if timestamp is not None:
obj["date"] = timestamp
obj["reason"] = "kernel panic stackshot"
obj["incident"] = "ABCDEFGH-1234-56IJ-789K-0LMNOPQRSTUV"
obj["crashReporterKey"] = "<KEY>"
obj["bootArgs"] = ss.get('boot_args','')
obj["frontmostPids"] = [0]
obj["exception"] = "0xDEADF157"
obj["processByPid"] = {}
if incomplete:
obj["reason"] = "!!!INCOMPLETE!!! kernel panic stackshot"
obj["notes"] = "This stackshot report generated from incomplete data! Some information is missing! "
processByPid = obj["processByPid"]
ssplist = ss.get('task_snapshots', {})
kern_load_info = []
if "0" in ssplist:
kc_uuid = ssplist["0"].get('kernelcache_load_info', None)
if kc_uuid:
kernelcache_uuid = [format_uuid(kc_uuid['imageUUID']), kc_uuid['imageLoadAddress'], "U" ]
kern_load_info.append(kernelcache_uuid)
kl_infos = ssplist["0"].get("dyld_load_info", [])
for dlinfo in kl_infos:
kern_load_info.append([format_uuid(dlinfo['imageUUID']), dlinfo['imageLoadAddress'], "K"])
for pid,piddata in ssplist.iteritems():
processByPid[str(pid)] = {}
tsnap = processByPid[str(pid)]
pr_lib_dsc = dsc_common
if 'shared_cache_dyld_load_info' in tsnap:
if 'imageSlidBaseAddress' in tsnap.get('shared_cache_dyld_load_info'):
shared_cache_base_addr = tsnap.get('shared_cache_dyld_load_info')['imageSlidBaseAddress']
else:
print "Specific task shared cache format does not include slid shared cache base address. Skipping writing report."
return
pr_lib_dsc = [format_uuid(tsnap.get('shared_cache_dyld_load_info')['imageUUID']),
tsnap.get('shared_cache_dyld_load_info')['imageSlidBaseAddress'],
"S"]
pr_libs = []
if len(dsc_libs) == 0 and pr_lib_dsc:
pr_libs.append(pr_lib_dsc)
_lib_type = "P"
if int(pid) == 0:
_lib_type = "K"
pr_libs = []
else:
for dlinfo in piddata.get('dyld_load_info',[]):
pr_libs.append([format_uuid(dlinfo['imageUUID']), dlinfo['imageLoadAddress'], _lib_type])
pr_libs.extend(kern_load_info)
pr_libs.extend(dsc_libs)
pr_libs.sort(key=itemgetter(1))
if 'task_snapshot' not in piddata:
continue
tasksnap = piddata['task_snapshot']
tsnap["pid"] = tasksnap["ts_pid"]
if 'ts_asid' in piddata:
tsnap["asid"] = piddata["ts_asid"]
if 'ts_pagetable' in piddata:
pagetables = []
for tte in piddata["ts_pagetable"]:
pagetables.append(tte)
tsnap["pageTables"] = pagetables
tsnap["residentMemoryBytes"] = tasksnap["ts_task_size"]
tsnap["timesDidThrottle"] = tasksnap["ts_did_throttle"]
tsnap["systemTimeTask"] = GetSecondsFromMATime(tasksnap["ts_system_time_in_terminated_th"], timebase)
tsnap["pageIns"] = tasksnap["ts_pageins"]
tsnap["pageFaults"] = tasksnap["ts_faults"]
tsnap["userTimeTask"] = GetSecondsFromMATime(tasksnap[ "ts_user_time_in_terminated_thre"], timebase)
tsnap["procname"] = tasksnap["ts_p_comm"]
tsnap["copyOnWriteFaults"] = tasksnap["ts_cow_faults"]
tsnap["timesThrottled"] = tasksnap["ts_was_throttled"]
tsnap["threadById"] = {}
threadByID = tsnap["threadById"]
thlist = piddata.get('thread_snapshots', {})
for tid,thdata in thlist.iteritems():
threadByID[str(tid)] = {}
thsnap = threadByID[str(tid)]
if "thread_snapshot" not in thdata:
print "Found broken thread state for thread ID: %s." % tid
break
threadsnap = thdata["thread_snapshot"]
thsnap["userTime"] = GetSecondsFromMATime(threadsnap["ths_user_time"], timebase)
thsnap["id"] = threadsnap["ths_thread_id"]
thsnap["basePriority"] = threadsnap["ths_base_priority"]
thsnap["systemTime"] = threadsnap["ths_sys_time"]
thsnap["schedPriority"] = threadsnap["ths_sched_priority"]
thsnap["state"] = GetStateDescription(threadsnap['ths_state'])
thsnap["qosEffective"] = threadsnap["ths_eqos"]
thsnap["qosRequested"] = threadsnap["ths_rqos"]
if "pth_name" in thdata:
thsnap["name"] = thdata["pth_name"];
if threadsnap['ths_continuation']:
thsnap["continuation"] = GetSymbolInfoForFrame(AllImageCatalog, pr_libs, threadsnap['ths_continuation'])
if "kernel_stack_frames" in thdata:
kuserframes = []
for f in thdata["kernel_stack_frames"]:
kuserframes.append(GetSymbolInfoForFrame(AllImageCatalog, pr_libs, f['lr']))
thsnap["kernelFrames"] = kuserframes
if "user_stack_frames" in thdata:
uframes = []
for f in thdata["user_stack_frames"]:
uframes.append(GetSymbolInfoForFrame(AllImageCatalog, pr_libs, f['lr']))
thsnap["userFrames"] = uframes
if "user_stacktop" in thdata:
(address,) = struct.unpack("<Q", struct.pack("B"*8, *thdata["user_stacktop"]["stack_contents"]))
thsnap["userStacktop"] = GetSymbolInfoForFrame(AllImageCatalog, pr_libs, address)
if threadsnap['ths_wait_event']:
thsnap["waitEvent"] = GetSymbolInfoForFrame(AllImageCatalog, pr_libs, threadsnap['ths_wait_event'])
if 'thread_waitinfo' in piddata and 'thread_turnstileinfo' in piddata:
tsnap['waitInfo'] = formatWaitInfoWithTurnstiles(piddata['thread_waitinfo'] , piddata['thread_turnstileinfo'])
elif 'thread_waitinfo' in piddata:
tsnap['waitInfo'] = map(formatWaitInfo, piddata['thread_waitinfo'])
obj['binaryImages'] = AllImageCatalog
if outfile_name == '-':
fh = sys.stdout
else:
fh = open(outfile_name, "w")
header = {}
header['bug_type'] = 288
if timestamp is not None:
header['timestamp'] = timestamp
header['os_version'] = os_version
fh.write(json.dumps(header))
fh.write("\n")
fh.write(json.dumps(obj, sort_keys=False, indent=2, separators=(',', ': ')))
fh.close()
## Base utils for interacting with shell ##
def RunCommand(bash_cmd_string, get_stderr = True):
"""
returns: (int,str) : exit_code and output_str
"""
print "RUNNING: %s" % bash_cmd_string
cmd_args = shlex.split(bash_cmd_string)
output_str = ""
exit_code = 0
try:
if get_stderr:
output_str = subprocess.check_output(cmd_args, stderr=subprocess.STDOUT)
else:
output_str = subprocess.check_output(cmd_args, stderr=None)
except subprocess.CalledProcessError, e:
exit_code = e.returncode
finally:
return (exit_code, output_str)
parser = argparse.ArgumentParser(description="Decode a kcdata binary file.")
parser.add_argument("-l", "--listtypes", action="store_true", required=False, default=False,
help="List all known types",
dest="list_known_types")
parser.add_argument("-s", "--stackshot", required=False, default=False,
help="Generate a stackshot report file",
dest="stackshot_file")
parser.add_argument("--multiple", help="look for multiple stackshots in a single file", action='store_true')
parser.add_argument("-p", "--plist", required=False, default=False,
help="output as plist", action="store_true")
parser.add_argument("-S", "--sdk", required=False, default="", help="sdk property passed to xcrun command to find the required tools. Default is empty string.", dest="sdk")
parser.add_argument("--pretty", default=False, action='store_true', help="make the output a little more human readable")
parser.add_argument("--incomplete", action='store_true', help="accept incomplete data")
parser.add_argument("kcdata_file", type=argparse.FileType('r'), help="Path to a kcdata binary file.")
class VerboseAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
logging.basicConfig(level=logging.INFO, stream=sys.stderr, format='%(message)s')
parser.add_argument('-v', "--verbose", action=VerboseAction, nargs=0)
@contextlib.contextmanager
def data_from_stream(stream):
try:
fmap = mmap.mmap(stream.fileno(), 0, mmap.MAP_SHARED, mmap.PROT_READ)
except:
yield stream.read()
else:
try:
yield fmap
finally:
fmap.close()
def iterate_kcdatas(kcdata_file):
with data_from_stream(kcdata_file) as data:
iterator = kcdata_item_iterator(data)
kcdata_buffer = KCObject.FromKCItem(iterator.next())
if not isinstance(kcdata_buffer, KCBufferObject):
iterator = kcdata_item_iterator(data[16:])
kcdata_buffer = KCObject.FromKCItem(iterator.next())
if not isinstance(kcdata_buffer, KCBufferObject):
try:
decoded = base64.b64decode(data)
except:
pass
else:
iterator = kcdata_item_iterator(decoded)
kcdata_buffer = KCObject.FromKCItem(iterator.next())
if not isinstance(kcdata_buffer, KCBufferObject):
import gzip
from io import BytesIO
try:
decompressed = gzip.GzipFile(fileobj=BytesIO(data[:])).read()
except:
pass
else:
iterator = kcdata_item_iterator(decompressed)
kcdata_buffer = KCObject.FromKCItem(iterator.next())
if not isinstance(kcdata_buffer, KCBufferObject):
raise Exception, "unknown file type"
kcdata_buffer.ReadItems(iterator)
yield kcdata_buffer
for magic in iterator:
kcdata_buffer = KCObject.FromKCItem(magic)
if kcdata_buffer.i_type == 0:
continue
if not isinstance(kcdata_buffer, KCBufferObject):
raise Exception, "unknown file type"
kcdata_buffer.ReadItems(iterator)
yield kcdata_buffer
def prettify(data):
if isinstance(data, list):
return map(prettify, data);
elif isinstance(data, dict):
newdata = dict()
for key, value in data.items():
if 'uuid' in key.lower() and isinstance(value, list) and len(value) == 16:
value = '%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X' % tuple(value)
elif 'address' in key.lower() and isinstance(value, (int, long)):
value = '0x%X' % value
elif key == 'lr':
value = '0x%X' % value
elif key == 'thread_waitinfo':
value = map(formatWaitInfo, value)
elif key == 'stack_contents':
print value
(address,) = struct.unpack("<Q", struct.pack("B"*8, *value))
value = '0x%X' % address
else:
value = prettify(value);
newdata[key] = value
return newdata
else:
return data
if __name__ == '__main__':
args = parser.parse_args()
if args.multiple and args.stackshot_file:
raise NotImplementedError
if args.list_known_types:
for (n, t) in KNOWN_TYPES_COLLECTION.items():
print "%d : %s " % (n, str(t))
sys.exit(1)
if args.incomplete or args.stackshot_file:
G.accept_incomplete_data = True
for i,kcdata_buffer in enumerate(iterate_kcdatas(args.kcdata_file)):
if i > 0 and not args.multiple:
break
str_data = "{" + kcdata_buffer.GetJsonRepr() + "}"
str_data = str_data.replace("\t", " ")
try:
json_obj = json.loads(str_data)
except:
print >>sys.stderr, "JSON reparsing failed! Printing string data!\n"
import textwrap
print textwrap.fill(str_data, 100)
raise
if args.pretty:
json_obj = prettify(json_obj)
if args.stackshot_file:
SaveStackshotReport(json_obj, args.stackshot_file, G.data_was_incomplete)
elif args.plist:
import Foundation
| |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder
from fairseq.models.transformer import Embedding, TransformerDecoderLayer
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .levenshtein_utils import (
_apply_del_words,
_apply_ins_masks,
_apply_ins_words,
_fill,
_get_del_targets,
_get_ins_targets,
_skip,
_skip_encoder_out,
)
@register_model("levenshtein_transformer")
class LevenshteinTransformerModel(FairseqNATModel):
@property
def allow_length_beam(self):
return False
@staticmethod
def add_args(parser):
FairseqNATModel.add_args(parser)
parser.add_argument(
"--early-exit",
default="6,6,6",
type=str,
help="number of decoder layers before word_del, mask_ins, word_ins",
)
parser.add_argument(
"--no-share-discriminator",
action="store_true",
help="separate parameters for discriminator",
)
parser.add_argument(
"--no-share-maskpredictor",
action="store_true",
help="separate parameters for mask-predictor",
)
parser.add_argument(
"--share-discriminator-maskpredictor",
action="store_true",
help="share the parameters for both mask-predictor and discriminator",
)
parser.add_argument(
"--sampling-for-deletion",
action="store_true",
help="instead of argmax, use sampling to predict the tokens",
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = LevenshteinTransformerDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
assert tgt_tokens is not None, "forward function only supports training."
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# generate training labels for insertion
masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets(
prev_output_tokens, tgt_tokens, self.pad, self.unk
)
mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction
mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad)
mask_ins_out, _ = self.decoder.forward_mask_ins(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_out, _ = self.decoder.forward_word_ins(
normalize=False,
prev_output_tokens=masked_tgt_tokens,
encoder_out=encoder_out,
)
# make online prediction
if self.decoder.sampling_for_deletion:
word_predictions = torch.multinomial(
F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1
).view(word_ins_out.size(0), -1)
else:
word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1]
word_predictions.masked_scatter_(
~masked_tgt_masks, tgt_tokens[~masked_tgt_masks]
)
# generate training labels for deletion
word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad)
word_del_out, _ = self.decoder.forward_word_del(
normalize=False,
prev_output_tokens=word_predictions,
encoder_out=encoder_out,
)
word_del_masks = word_predictions.ne(self.pad)
return {
"mask_ins": {
"out": mask_ins_out,
"tgt": mask_ins_targets,
"mask": mask_ins_masks,
"ls": 0.01,
},
"word_ins": {
"out": word_ins_out,
"tgt": tgt_tokens,
"mask": masked_tgt_masks,
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"word_del": {
"out": word_del_out,
"tgt": word_del_targets,
"mask": word_del_masks,
},
}
def forward_decoder(
self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs
):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
attn = decoder_out.attn
history = decoder_out.history
bsz = output_tokens.size(0)
if max_ratio is None:
max_lens = torch.zeros_like(output_tokens).fill_(255)
else:
if not encoder_out["encoder_padding_mask"]:
max_src_len = encoder_out["encoder_out"].size(0)
src_lens = encoder_out["encoder_out"].new(bsz).fill_(max_src_len)
else:
src_lens = (~encoder_out["encoder_padding_mask"][0]).sum(1)
max_lens = (src_lens * max_ratio).clamp(min=10).long()
# delete words
# do not delete tokens if it is <s> </s>
can_del_word = output_tokens.ne(self.pad).sum(1) > 2
if can_del_word.sum() != 0: # we cannot delete, skip
word_del_score, word_del_attn = self.decoder.forward_word_del(
normalize=True,
prev_output_tokens=_skip(output_tokens, can_del_word),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_del_word),
)
word_del_pred = word_del_score.max(-1)[1].bool()
_tokens, _scores, _attn = _apply_del_words(
output_tokens[can_del_word],
output_scores[can_del_word],
word_del_attn,
word_del_pred,
self.pad,
self.bos,
self.eos,
)
output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_del_word, _scores, 0)
attn = _fill(attn, can_del_word, _attn, 0.0)
if history is not None:
history.append(output_tokens.clone())
# insert placeholders
can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens
if can_ins_mask.sum() != 0:
mask_ins_score, _ = self.decoder.forward_mask_ins(
normalize=True,
prev_output_tokens=_skip(output_tokens, can_ins_mask),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_mask),
)
if eos_penalty > 0.0:
mask_ins_score[:, :, 0] = mask_ins_score[:, :, 0] - eos_penalty
mask_ins_pred = mask_ins_score.max(-1)[1]
mask_ins_pred = torch.min(
mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred)
)
_tokens, _scores = _apply_ins_masks(
output_tokens[can_ins_mask],
output_scores[can_ins_mask],
mask_ins_pred,
self.pad,
self.unk,
self.eos,
)
output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_mask, _scores, 0)
if history is not None:
history.append(output_tokens.clone())
# insert words
can_ins_word = output_tokens.eq(self.unk).sum(1) > 0
if can_ins_word.sum() != 0:
word_ins_score, word_ins_attn = self.decoder.forward_word_ins(
normalize=True,
prev_output_tokens=_skip(output_tokens, can_ins_word),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_word),
)
word_ins_score, word_ins_pred = word_ins_score.max(-1)
_tokens, _scores = _apply_ins_words(
output_tokens[can_ins_word],
output_scores[can_ins_word],
word_ins_pred,
word_ins_score,
self.unk,
)
output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_word, _scores, 0)
attn = _fill(attn, can_ins_word, word_ins_attn, 0.0)
if history is not None:
history.append(output_tokens.clone())
# delete some unnecessary paddings
cut_off = output_tokens.ne(self.pad).sum(1).max()
output_tokens = output_tokens[:, :cut_off]
output_scores = output_scores[:, :cut_off]
attn = None if attn is None else attn[:, :cut_off, :]
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=attn,
history=history,
)
def initialize_output_tokens(self, encoder_out, src_tokens):
initial_output_tokens = src_tokens.new_zeros(src_tokens.size(0), 2)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens[:, 1] = self.eos
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(encoder_out["encoder_out"][0])
return DecoderOut(
output_tokens=initial_output_tokens,
output_scores=initial_output_scores,
attn=None,
step=0,
max_step=0,
history=None,
)
class LevenshteinTransformerDecoder(FairseqNATDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.dictionary = dictionary
self.bos = dictionary.bos()
self.unk = dictionary.unk()
self.eos = dictionary.eos()
self.sampling_for_deletion = getattr(args, "sampling_for_deletion", False)
self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None)
self.embed_word_del = Embedding(2, self.output_embed_dim, None)
# del_word, ins_mask, ins_word
self.early_exit = [int(i) for i in args.early_exit.split(",")]
assert len(self.early_exit) == 3
# copy layers for mask-predict/deletion
self.layers_msk = None
if getattr(args, "no_share_maskpredictor", False):
self.layers_msk = nn.ModuleList(
[
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(self.early_exit[1])
]
)
self.layers_del = None
if getattr(args, "no_share_discriminator", False):
self.layers_del = nn.ModuleList(
[
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(self.early_exit[0])
]
)
if getattr(args, "share_discriminator_maskpredictor", False):
assert getattr(
args, "no_share_discriminator", False
), "must set saperate discriminator"
self.layers_msk = self.layers_del
def extract_features(
self,
prev_output_tokens,
encoder_out=None,
early_exit=None,
layers=None,
**unused
):
"""
Similar to *forward* but only return features.
Inputs:
prev_output_tokens: Tensor(B, T)
encoder_out: a dictionary of hidden states and masks
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
the LevenshteinTransformer decoder has full-attention to all generated tokens
"""
# embed positions
positions = (
self.embed_positions(prev_output_tokens)
if self.embed_positions is not None
else None
)
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
decoder_padding_mask = prev_output_tokens.eq(self.padding_idx)
layers = self.layers if layers is None else layers
early_exit = len(layers) if early_exit is None else early_exit
for _, layer in enumerate(layers[:early_exit]):
x, attn, _ = layer(
x,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
self_attn_mask=None,
self_attn_padding_mask=decoder_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": attn, "inner_states": inner_states}
@ensemble_decoder
def forward_mask_ins(self, normalize, encoder_out, prev_output_tokens, **unused):
features, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
early_exit=self.early_exit[1],
layers=self.layers_msk,
**unused
)
features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2)
decoder_out = F.linear(features_cat, self.embed_mask_ins.weight)
if normalize:
return F.log_softmax(decoder_out, -1), extra["attn"]
return decoder_out, extra["attn"]
@ensemble_decoder
def forward_word_ins(self, normalize, encoder_out, prev_output_tokens, **unused):
features, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
early_exit=self.early_exit[2],
layers=self.layers,
**unused
)
decoder_out = self.output_layer(features)
if normalize:
return F.log_softmax(decoder_out, -1), extra["attn"]
return decoder_out, extra["attn"]
@ensemble_decoder
def forward_word_del(self, normalize, encoder_out, prev_output_tokens, **unused):
features, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
early_exit=self.early_exit[0],
layers=self.layers_del,
**unused
)
decoder_out = F.linear(features, self.embed_word_del.weight)
if normalize:
return F.log_softmax(decoder_out, -1), extra["attn"]
return decoder_out, extra["attn"]
@register_model_architecture("levenshtein_transformer", "levenshtein_transformer")
def levenshtein_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.early_exit = getattr(args, "early_exit", "6,6,6")
args.no_share_discriminator = getattr(args, "no_share_discriminator", False)
args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False)
args.share_discriminator_maskpredictor = getattr(
args, "share_discriminator_maskpredictor", False
)
args.no_share_last_layer = getattr(args, "no_share_last_layer", False)
@register_model_architecture(
"levenshtein_transformer", "levenshtein_transformer_wmt_en_de"
)
def levenshtein_transformer_wmt_en_de(args):
levenshtein_base_architecture(args)
# similar parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture(
"levenshtein_transformer", "levenshtein_transformer_vaswani_wmt_en_de_big"
)
def levenshtein_transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", | |
x)
_BIN_OPS_SEARCH_ORDER.append(
(_FLIPPED_OPS[op], b_type, a_type, flipped)
)
_COMPARISON_OPS: Set[BinFn] = {
ops.eq,
ops.ne,
ops.ge,
ops.gt,
ops.le,
ops.lt,
}
_ARITHMETIC_OPS: Set[BinFn] = {
ops.add,
ops.sub,
ops.mul,
ops.truediv,
ops.floordiv,
ops.mod,
ops.pow,
}
_BITWISE_OPS: Set[BinFn] = {
ops.and_,
ops.or_,
ops.xor,
ops.rshift,
ops.lshift,
}
def apply_smt(op: BinFn, x: z3.ExprRef, y: z3.ExprRef) -> z3.ExprRef:
# Mostly, z3 overloads operators and things just work.
# But some special cases need to be checked first.
space = context_statespace()
if op in _ARITHMETIC_OPS:
if op in (ops.truediv, ops.floordiv, ops.mod):
if space.smt_fork(y == 0):
raise ZeroDivisionError("division by zero")
if op == ops.floordiv:
if space.smt_fork(y >= 0):
if space.smt_fork(x >= 0):
return x / y
else:
return -((y - x - 1) / y)
else:
if space.smt_fork(x >= 0):
return -((x - y - 1) / -y)
else:
return -x / -y
if op == ops.mod:
if space.smt_fork(y >= 0):
return x % y
elif space.smt_fork(x % y == 0):
return 0
else:
return (x % y) + y
elif op == ops.pow:
if space.smt_fork(z3.And(x == 0, y < 0)):
raise ZeroDivisionError("zero cannot be raised to a negative power")
return op(x, y)
_ARITHMETIC_AND_COMPARISON_OPS = _ARITHMETIC_OPS.union(_COMPARISON_OPS)
_ALL_OPS = _ARITHMETIC_AND_COMPARISON_OPS.union(_BITWISE_OPS)
def setup_binops():
# Lower entries take precendence when searching.
# We check NaN and infitity immediately; not all
# symbolic floats support these cases.
def _(a: Real, b: float):
if math.isfinite(b):
return (a, FiniteFloat(b)) # type: ignore
return (a, NonFiniteFloat(b))
setup_promotion(_, _ARITHMETIC_AND_COMPARISON_OPS)
# Almost all operators involving booleans should upconvert to integers.
def _(a: SymbolicBool, b: Number):
return (SymbolicInt(z3.If(a.var, 1, 0)), b)
setup_promotion(_, _ALL_OPS)
# Implicitly upconvert symbolic ints to floats.
def _(a: SymbolicInt, b: Union[float, FiniteFloat, SymbolicFloat, complex]):
return (SymbolicFloat(z3.ToReal(a.var)), b)
setup_promotion(_, _ARITHMETIC_AND_COMPARISON_OPS)
# Implicitly upconvert native ints to floats.
def _(a: int, b: Union[float, FiniteFloat, SymbolicFloat, complex]):
return (float(a), b)
setup_promotion(_, _ARITHMETIC_AND_COMPARISON_OPS)
# Implicitly upconvert native bools to ints.
def _(a: bool, b: Union[SymbolicInt, SymbolicFloat]):
return (int(a), b)
setup_promotion(_, _ARITHMETIC_AND_COMPARISON_OPS)
# complex
def _(op: BinFn, a: SymbolicNumberAble, b: complex):
return op(complex(a), b) # type: ignore
setup_binop(_, _ALL_OPS)
# float
def _(op: BinFn, a: SymbolicFloat, b: SymbolicFloat):
return SymbolicFloat(apply_smt(op, a.var, b.var))
setup_binop(_, _ARITHMETIC_OPS)
def _(op: BinFn, a: SymbolicFloat, b: SymbolicFloat):
return SymbolicBool(apply_smt(op, a.var, b.var))
setup_binop(_, _COMPARISON_OPS)
def _(op: BinFn, a: SymbolicFloat, b: FiniteFloat):
return SymbolicFloat(apply_smt(op, a.var, z3.RealVal(b.val)))
setup_binop(_, _ARITHMETIC_OPS)
def _(op: BinFn, a: FiniteFloat, b: SymbolicFloat):
return SymbolicFloat(apply_smt(op, z3.RealVal(a.val), b.var))
setup_binop(_, _ARITHMETIC_OPS)
def _(op: BinFn, a: Union[FiniteFloat, SymbolicFloat], b: NonFiniteFloat):
if isinstance(a, FiniteFloat):
comparable_a: Union[float, SymbolicFloat] = a.val
else:
comparable_a = a
# These three cases help cover operations like `a * -inf` which is either
# positive of negative infinity depending on the sign of `a`.
if comparable_a > 0: # type: ignore
return op(1, b.val) # type: ignore
elif comparable_a < 0:
return op(-1, b.val) # type: ignore
else:
return op(0, b.val) # type: ignore
setup_binop(_, _ARITHMETIC_AND_COMPARISON_OPS)
def _(op: BinFn, a: NonFiniteFloat, b: NonFiniteFloat):
return op(a.val, b.val) # type: ignore
setup_binop(_, _ARITHMETIC_AND_COMPARISON_OPS)
def _(op: BinFn, a: SymbolicFloat, b: FiniteFloat):
return SymbolicBool(apply_smt(op, a.var, z3.RealVal(b.val)))
setup_binop(_, _COMPARISON_OPS)
# int
def _(op: BinFn, a: SymbolicInt, b: SymbolicInt):
return SymbolicInt(apply_smt(op, a.var, b.var))
setup_binop(_, _ARITHMETIC_OPS)
def _(op: BinFn, a: SymbolicInt, b: SymbolicInt):
return SymbolicBool(apply_smt(op, a.var, b.var))
setup_binop(_, _COMPARISON_OPS)
def _(op: BinFn, a: SymbolicInt, b: int):
return SymbolicInt(apply_smt(op, a.var, z3IntVal(b)))
setup_binop(_, _ARITHMETIC_OPS)
def _(op: BinFn, a: int, b: SymbolicInt):
return SymbolicInt(apply_smt(op, z3IntVal(a), b.var))
setup_binop(_, _ARITHMETIC_OPS)
def _(op: BinFn, a: SymbolicInt, b: int):
return SymbolicBool(apply_smt(op, a.var, z3IntVal(b)))
setup_binop(_, _COMPARISON_OPS)
def _(op: BinFn, a: Integral, b: Integral):
# Some bitwise operators require realization presently.
# TODO: when one side is already realized, we could do something smarter.
return op(a.__index__(), b.__index__()) # type: ignore
setup_binop(_, {ops.or_, ops.xor})
def _(op: BinFn, a: Integral, b: Integral):
if b < 0:
raise ValueError("negative shift count")
b = realize(b) # Symbolic exponents defeat the solver
if op == ops.lshift:
return a * (2 ** b)
else:
return a // (2 ** b)
setup_binop(_, {ops.lshift, ops.rshift})
_AND_MASKS_TO_MOD = {
# It's common to use & to mask low bits. We can avoid realization by converting
# these situations into mod operations.
0x01: 2,
0x03: 4,
0x07: 8,
0x0F: 16,
0x1F: 32,
0x3F: 64,
0x7F: 128,
0xFF: 256,
}
def _(op: BinFn, a: Integral, b: Integral):
with NoTracing():
if isinstance(b, SymbolicInt):
# Have `a` be symbolic, if possible
(a, b) = (b, a)
# Check whether we can interpret the mask as a mod operation:
b = realize(b)
if b == 0:
return 0
mask_mod = _AND_MASKS_TO_MOD.get(b)
if mask_mod and isinstance(a, SymbolicInt):
if context_statespace().smt_fork(a.var >= 0, probability_true=0.75):
return SymbolicInt(a.var % mask_mod)
else:
return SymbolicInt(b - ((-a.var - 1) % mask_mod))
# Fall back to full realization
return op(realize(a), b) # type: ignore
setup_binop(_, {ops.and_})
# TODO: is this necessary still?
def _(
op: BinFn, a: Integral, b: Integral
): # Floor division over ints requires realization, at present
return op(a.__index__(), b.__index__()) # type: ignore
setup_binop(_, {ops.truediv})
def _(a: SymbolicInt, b: Number): # Division over ints must produce float
return (a.__float__(), b)
setup_promotion(_, {ops.truediv})
# bool
def _(op: BinFn, a: SymbolicBool, b: SymbolicBool):
return SymbolicBool(apply_smt(op, a.var, b.var))
setup_binop(_, {ops.eq, ops.ne})
#
# END new numbers
#
class SymbolicNumberAble(SymbolicValue, Real):
def __pos__(self):
return self
def __neg__(self):
return self._unary_op(ops.neg)
def __abs__(self):
return self._unary_op(lambda v: z3.If(v < 0, -v, v))
def __lt__(self, other):
return numeric_binop(ops.lt, self, other)
def __gt__(self, other):
return numeric_binop(ops.gt, self, other)
def __le__(self, other):
return numeric_binop(ops.le, self, other)
def __ge__(self, other):
return numeric_binop(ops.ge, self, other)
def __eq__(self, other):
return numeric_binop(ops.eq, self, other)
def __add__(self, other):
return numeric_binop(ops.add, self, other)
def __radd__(self, other):
return numeric_binop(ops.add, other, self)
def __sub__(self, other):
return numeric_binop(ops.sub, self, other)
def __rsub__(self, other):
return numeric_binop(ops.sub, other, self)
def __mul__(self, other):
return numeric_binop(ops.mul, self, other)
def __rmul__(self, other):
return numeric_binop(ops.mul, other, self)
def __pow__(self, other, mod=None):
if mod is not None:
return pow(realize(self), pow, mod)
return numeric_binop(ops.pow, self, other)
def __rpow__(self, other, mod=None):
if mod is not None:
return pow(other, realize(self), mod)
return numeric_binop(ops.pow, other, self)
def __lshift__(self, other):
return numeric_binop(ops.lshift, self, other)
def __rlshift__(self, other):
return numeric_binop(ops.lshift, other, self)
def __rshift__(self, other):
return numeric_binop(ops.rshift, self, other)
def __rrshift__(self, other):
return numeric_binop(ops.rshift, other, self)
def __and__(self, other):
return numeric_binop(ops.and_, self, other)
def __rand__(self, other):
return numeric_binop(ops.and_, other, self)
def __or__(self, other):
return numeric_binop(ops.or_, self, other)
def __ror__(self, other):
return numeric_binop(ops.or_, other, self)
def __xor__(self, other):
return numeric_binop(ops.xor, self, other)
def __rxor__(self, other):
return numeric_binop(ops.xor, other, self)
def __rtruediv__(self, other):
return numeric_binop(ops.truediv, other, self)
def __floordiv__(self, other):
return numeric_binop(ops.floordiv, self, other)
def __rfloordiv__(self, other):
return numeric_binop(ops.floordiv, other, self)
def __mod__(self, other):
return numeric_binop(ops.mod, self, other)
def __rmod__(self, other):
return numeric_binop(ops.mod, other, self)
def __divmod__(self, other):
return (self // other, self % other)
def __rdivmod__(self, other):
return (other // self, other % self)
def __format__(self, fmt: str):
return realize(self).__format__(realize(fmt))
class SymbolicIntable(SymbolicNumberAble, Integral):
# bitwise operators
def __invert__(self):
return -(self + 1)
def __floor__(self):
return self
def __ceil__(self):
return self
def __trunc__(self):
return self
def __mul__(self, other):
if isinstance(other, str):
if self <= 0:
return ""
return other * realize(self)
return numeric_binop(ops.mul, self, other)
__rmul__ = __mul__
def bit_count(self):
if self < 0:
return (-self).bit_count()
count = 0
threshold = 2
halfway = 1
while self >= halfway:
if self % threshold >= halfway:
count += 1
threshold *= 2
halfway *= 2
return count
class SymbolicBool(SymbolicIntable, AtomicSymbolicValue):
def __init__(self, smtvar: Union[str, z3.ExprRef], typ: Type = bool):
assert typ == bool
SymbolicValue.__init__(self, smtvar, typ)
@classmethod
def _ch_smt_sort(cls) -> z3.SortRef:
return _SMT_BOOL_SORT
@classmethod
def _pytype(cls) -> Type:
return bool
@classmethod
def _smt_promote_literal(cls, literal) -> Optional[z3.SortRef]:
if isinstance(literal, bool):
return z3.BoolVal(literal)
return None
def __ch_realize__(self) -> object:
return self.statespace.choose_possible(self.var)
def __neg__(self):
return SymbolicInt(z3.If(self.var, -1, 0))
def __repr__(self):
return self.__bool__().__repr__()
def __hash__(self):
return self.__bool__().__hash__()
def __index__(self):
return SymbolicInt(z3.If(self.var, 1, 0))
def __bool__(self):
with NoTracing():
return self.statespace.choose_possible(self.var)
def __int__(self):
return SymbolicInt(z3.If(self.var, 1, 0))
def __float__(self):
return SymbolicFloat(smt_bool_to_float(self.var))
def __complex__(self):
return complex(self.__float__())
def __round__(self, ndigits=None):
# This could be smarter, but nobody rounds a bool right?:
return round(realize(self), realize(ndigits))
class SymbolicInt(SymbolicIntable, AtomicSymbolicValue):
def __init__(self, smtvar: Union[str, z3.ExprRef], typ: | |
# -*- coding: utf-8 -*-
# cython: profile=False
"""
Created on Fri Nov 4 13:05:59 2011
@author: <NAME>
TODO:
Look into using e.g. sp.linalg.fblas.zgemm._cpointer from cython? Or
link it to blas at compile time using distutils...
"""
import scipy as sp
import scipy.linalg as la
#import scipy.sparse as spa
class eyemat(object):
__array_priority__ = 10.1 #makes right-ops work, ala sparse
def __init__(self, D, dtype=sp.float64):
self.shape = (D, D)
self.dtype = dtype
self.data = None
def __array__(self):
return self.toarray()
def toarray(self):
return sp.eye(self.shape[0], dtype=self.dtype)
def __mul__(self, other):
if sp.isscalar(other):
return simple_diag_matrix(sp.ones(self.shape[0], self.dtype) * other)
try:
if other.shape == self.shape:
return simple_diag_matrix(other.diagonal())
except:
return NotImplemented
return self.toarray() * other
def __rmul__(self, other):
return self.__mul__(self, other)
def __add__(self, other):
try:
return self.toarray() + other
except:
return NotImplemented
def __radd__(self, other):
try:
return other + self.toarray()
except:
return NotImplemented
def __sub__(self, other):
try:
return self.toarray() - other
except:
return NotImplemented
def __rsub__(self, other):
try:
return other - self.toarray()
except:
return NotImplemented
def __coerce__(self, other):
try:
other = sp.asanyarray(other)
if other.shape == self.shape or sp.isscalar(other):
return (self.toarray(), other)
else:
return NotImplemented
except:
return NotImplemented
def dot(self, other):
if self.shape[1] == other.shape[0]:
return other
else:
raise BaseException
def dot_left(self, other):
if self.shape[0] == other.shape[1]:
return other
else:
raise BaseException
def conj(self):
return self
def transpose(self):
return self
def trace(self, offset=0):
if offset == 0:
return self.shape[0]
else:
return 0
def diagonal(self):
return sp.ones((self.shape[0]), dtype=self.dtype)
def sqrt(self):
return self
def inv(self):
return self
def ravel(self):
return self.toarray().ravel()
def copy(self, order='C'):
return eyemat(self.shape[0], dtype=self.dtype)
def __getattr__(self, attr):
if attr == 'A':
return self.toarray()
elif attr == 'T':
return self.transpose()
else:
raise AttributeError(attr + " not found")
class simple_diag_matrix:
__array_priority__ = 10.1 #makes right-ops work, ala sparse
diag = None
shape = None
dtype = None
def __init__(self, diag, dtype=None):
self.dtype = dtype
diag = sp.asanyarray(diag, dtype=dtype)
assert diag.ndim == 1
self.diag = diag
self.shape = (diag.shape[0], diag.shape[0])
def __array__(self):
return self.toarray()
def dot(self, b):
if isinstance(b, simple_diag_matrix):
return simple_diag_matrix(self.diag * b.diag)
return mmul_diag(self.diag, b)
def dot_left(self, a):
if isinstance(a, simple_diag_matrix):
return simple_diag_matrix(self.diag * a.diag)
return mmul_diag(self.diag, a, act_right=False)
def conj(self):
return simple_diag_matrix(self.diag.conj())
def transpose(self):
return self
def inv(self):
return simple_diag_matrix(1. / self.diag)
def sqrt(self):
return simple_diag_matrix(sp.sqrt(self.diag))
def ravel(self):
return sp.diag(self.diag).ravel()
def diagonal(self):
return self.diag
def trace(self, offset=0):
if offset == 0:
return self.diag.sum()
else:
return 0
def toarray(self):
return sp.diag(self.diag)
def copy(self, order='C'):
return simple_diag_matrix(self.diag.copy())
def __mul__(self, other):
if sp.isscalar(other):
return simple_diag_matrix(self.diag * other)
try:
other = sp.asanyarray(other)
if other.shape == self.shape:
return simple_diag_matrix(self.diag * other.diagonal())
return self.toarray() * other
except:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(self, other)
def __add__(self, other):
try:
return self.toarray() + other
except:
return NotImplemented
def __radd__(self, other):
try:
return other + self.toarray()
except:
return NotImplemented
def __sub__(self, other):
try:
return self.toarray() - other
except:
return NotImplemented
def __rsub__(self, other):
try:
return other - self.toarray()
except:
return NotImplemented
def __coerce__(self, other):
try:
other = sp.asanyarray(other)
if other.shape == self.shape:
return (self.toarray(), other)
else:
return None
except:
return None
def __getattr__(self, attr):
if attr == 'A':
return self.toarray()
elif attr == 'T':
return self.transpose()
else:
raise AttributeError(attr + " not found")
def mmul(*args):
"""Multiplies a chain of matrices (2-d ndarrays)
All matrices must have dimensions compatible with matrix multiplication.
This function actually calls the dot() method of the objects passed in
as arguments. It thus handles any object that provides a dot() method
that accepts 2D ndarrays.
We also try to call dot_left(), in case an optimized left-acting
dot operation is available.
This function is intended to work nicely with the above defined "sparse"
matrix objects.
Parameters
----------
*args : ndarray
The chain of matrices to multiply together.
Returns
-------
out : ndarray
The result.
"""
#if not out is None and (args.count == 2 and out in args or args[-1] is out):
# raise
res = args[0]
for x in args[1:]:
try:
res = x.dot_left(res)
except:
res = res.dot(x)
#Since, for some reason, the method version of dot() does not generally
#take an "out" argument, I ignored this (for now, minor) optimization.
return res
# if out is None:
# return res.dot(args[-1])
# elif out.size == 1: #dot() seems to dislike this
# out[...] = res.dot(args[-1])
# return out
# else:
# return sp.dot(res, args[-1], out=out)
def adot(a, b):
"""
Calculates the scalar product for the ancilla, expecting
the arguments in matrix form.
Equivalent to trace(dot(H(a), b))
"""
return sp.inner(a.ravel().conj(), b.ravel())
def adot_noconj(a, b):
"""
Calculates the scalar product for the ancilla, expecting
the arguments in matrix form.
Equivalent to trace(dot(a, b))
"""
return sp.inner(a.T.ravel(), b.ravel())
def H(m, out=None):
"""Matrix conjugate transpose (adjoint).
This is just a shortcut for performing this operation on normal ndarrays.
Parameters
----------
m : ndarray
The input matrix.
out : ndarray
A matrix to hold the final result (dimensions must be correct). May be None.
May also be the same object as m.
Returns
-------
out : ndarray
The result.
"""
if out is None:
return m.T.conj()
else:
out = sp.conjugate(m.T, out)
return out
def randomize_cmplx(x, a=-0.5, b=0.5, aj=-0.5, bj=0.5):
"""Randomizes a complex matrix in place.
"""
x[:] = (((b - a) * sp.random.ranf(x.shape) + a)
+ 1.j * ((bj - aj) * sp.random.ranf(x.shape) + aj))
return x
def sqrtmh(A, ret_evd=False, evd=None):
"""Return the matrix square root of a hermitian or symmetric matrix
Uses scipy.linalg.eigh() to diagonalize the input efficiently.
Parameters
----------
A : ndarray
A hermitian or symmetric two-dimensional square array (a matrix).
evd : (ev, EV)
A tuple containing the 1D array of eigenvalues ev and the matrix of eigenvectors EV.
ret_evd : Boolean
Return the eigenvalue decomposition of the result.
Returns
-------
sqrt_A : ndarray
An array of the same shape and type as A containing the matrix square root of A.
(ev, EV) : (ndarray, ndarray)
A 1D array of eigenvalues and the matrix of eigenvectors.
Notes
-----
The result is also Hermitian.
"""
if not evd is None:
(ev, EV) = evd
else:
ev, EV = la.eigh(A) #uses LAPACK ***EVR
ev = sp.sqrt(ev) #we don't require positive (semi) definiteness, so we need the scipy sqrt here
#Carry out multiplication with the diagonal matrix of eigenvalue square roots with H(EV)
B = mmul_diag(ev, H(EV))
if ret_evd:
return mmul(EV, B), (ev, EV)
else:
return mmul(EV, B)
def mmul_diag(Adiag, B, act_right=True):
if act_right:
assert B.shape[0] == Adiag.shape[0]
else:
assert B.shape[1] == Adiag.shape[0]
assert Adiag.ndim == 1
assert B.ndim == 2
if act_right:
#B = sp.asarray(B, order='F')
tmp = Adiag * B.T
out = tmp.T
else:
out = Adiag * B
return out
def invmh(A, ret_evd=False, evd=None):
if not evd is None:
(ev, EV) = evd
else:
ev, EV = la.eigh(A)
ev = 1. / ev
B = mmul_diag(ev, H(EV))
if ret_evd:
return mmul(EV, B), (ev, EV)
else:
return mmul(EV, B)
def sqrtmpo(A, out=None):
"""Return the matrix square root of a hermitian or symmetric positive definite matrix
Uses a Cholesky decomposition, followed by a QR decomposition, and then
Nwewton iteration to obtain a polar form UH, with H Hermitian p.d. and
the desired square root, as described in algorithm 6.21 in:
<NAME>., "Functions of Matrices, Theory and Computation", SCIAM 2008
NOT YET IMPLEMENTED!
Parameters
----------
A : ndarray
A hermitian or symmetric two-dimensional square array (a matrix)
Returns
-------
sqrt_A : ndarray
An array of the same shape and type as A containing the matrix square root of A.
"""
R = la.cholesky(A)
R = la.qr(R, overwrite_a=True, mode='r')
#FIXME: NOTFINISHED
assert False
return 0
def invtr(A, overwrite=False, lower=False):
"""Compute the inverse of a | |
'Private Const a As Integer = 10, b As String = "hello", c As String * 10 = 43',
])
# << Parsing tests >> (10 of 61)
# Odds and ends
tests.extend([
"Private WithEvents A As Button",
])
# << Parsing tests >> (11 of 61)
# Bare calls
tests.extend([
"subr",
"object.method",
"object.method.method2.method",
])
# Explicit bare calls
tests.extend([
"Call subr",
"Call object.method",
"Call object.method.method2.method",
])
# Bare calls with arguments
tests.extend([
"subr 10, 20, 30",
"object.method a, b, c+d, e",
'object.method.method2.method 10, "hello", "goodbye" & name',
])
# Explicit calls with arguments
tests.extend([
"Call subr(10, 20, 30)",
"Call object.method(a, b, c+d, e)",
'Call object.method.method2.method(10, "hello", "goodbye" & name)',
"Call subr()",
])
# Bare calls with arguments and functions
tests.extend([
"subr 10, 20, 30",
"object(23).method a, b, c+d, e",
'object.method(5, 10, 20).method2.method 10, "hello", "goodbye" & name',
])
# Bare calls with named arguments and functions
tests.extend([
"subr 10, 20, z:=30",
"object(23).method one:=a, two:=b, three:=c+d, four:=e",
'object.method(5, 10, 20).method2.method 10, "hello", two:="goodbye" & name',
])
# Bare calls with ommitted arguments
tests.extend([
"subr 10, , 30",
"subr ,,,,0",
"subr 10, , , , 5",
])
# << Parsing tests >> (12 of 61)
# labels
tests.extend([
"label:",
"label20:",
"20:",
"label: a=1",
"20: a=1",
"101: doit",
"101:\ndoit",
"102: doit now",
"103: doit now, for, ever",
])
# Goto's
tests.extend([
"GoTo Label",
"GoTo 20",
"GoTo Label:",
"GoTo 20:",
])
# Structures with labels
tests.extend([
"""
101: If a < 10 Then
102: b=1
103: End If
""",
"""
101: While a < 0
102: b=1
103: Wend
""",
"""
101: Select Case a
102: Case 10
103: b= 1
104: Case Else
105: b=2
103: End Select
""",
"""
101: For i = 0 To 100
102: b=1
103: Next i
""",
"""
101: Sub a()
102: b=1
103: End Sub
""",
])
# Numeric labels don't even need a ':' ... aarg!
tests.extend([
"""
101 If a < 10 Then
102 b=1
103 End If
""",
"""
101 While a < 0
102 b=1
103 Wend
""",
"""
101 Select Case a
102 Case 10
103 b= 1
104 Case Else
105 b=2
103 End Select
""",
"""
101 For i = 0 To 100
102 b=1
103 Next i
""",
"""
101 Sub a()
102 b=1
103 End Sub
""",
])
# << Parsing tests >> (13 of 61)
# simple multi-line statements
tests.extend([
"a = 10: b = 20",
"a = 10: b = 20: c=1: d=1: e=2",
"a=10:",
"a=10: b=20:",
])
# Blocks on a line
tests.extend([
"For i = 0 To 10: b=b+i: Next i",
"If a > b Then a = 10: b = 20"
])
# Bug #809979 - Line ending with a colon fails
tests.extend([
"a = 10:\nb = 20",
"a = 10: b = 20:\nc=1: d=1: e=2",
"a=10:\nb=20:\nc=1",
])
# << Parsing tests >> (14 of 61)
# open statements
tests.extend([
"Open fn For Output As 12",
"Open fn For Output As #12",
"Open fn For Input As 12",
"Open fn For Input As #12",
"Open fn.gk.gl() For Input As #NxtChn()",
"Open fn For Append Lock Write As 23",
"Open fn For Random As 23 Len = 1234",
"Close 1",
"Close #1",
"Close channel",
"Close #channel",
"Close",
"Close\na=1",
"Closet = 10",
])
# Bug #810968 Close #1, #2 ' fails to parse
tests.extend([
"Close #1, #2, #3, #4",
"Close 1, 2, 3, 4",
"Close #1, 2, #3, 4",
"Close #one, #two, #three, #four",
"Close one, two, three, four",
"Close #1,#2,#3,#4",
"Close #1 , #2 , #3 , #4 ",
])
# << Parsing tests >> (15 of 61)
# print# statements
tests.extend([
"Print 10",
"Print #1, 10",
"Print 10, 20, 30;",
"Print #1, 10, 20, 30;",
"Print #1, 10; 20; 30;",
"Print #1, 10; 20; 30; 40, 50, 60, 70; 80; 90",
"Print 10, 20, 30,",
"Print 10, 20, 30",
"Print",
"Print ;;;",
"Print ,,,",
"Print 1,,,2,,,3,,,;",
"Print #3,",
"Print #3,;;;",
"Print #3,,,",
"Print #3,1,,,2,,,3,,,;",
])
# get# statements
tests.extend([
"Get #1, a, b",
"Get #1, , b",
])
# input # statements
tests.extend([
"Input #1, a, b",
"Input #1, b",
"a = Input(20, #3)",
"a = Input(20, #id)",
])
# line input # statements
tests.extend([
"Line Input #1, b",
])
# Seek
tests.extend([
"Seek #filenum, value",
"10: Seek #filenum, value",
"10: Seek #filenum, value ' comment",
"Seek #filenum, value ' comment",
])
# << Parsing tests >> (16 of 61)
tests.extend([
'Private Declare Function FileTimeToSystemTime Lib "kernel32" (ftFileTime As FILETIME, stSystemTime As SYSTEMTIME) As Long',
'Private Declare Sub Sleep Lib "kernel32" (ByVal dwMilliseconds As Long)',
'Private Declare Function GetFileAttributes Lib "kernel32" Alias "GetFileAttributesA" (ByVal lpFileName As String) As Long',
'Private Declare Function GetFileAttributes Lib "kernel32" _ \n(ByVal lpFileName As String) As Long',
'Private Declare Function GetFileAttributes Lib "kernel32" _ \n(ByVal lpFileName As String, A) As Long',
'Private Declare Function GetFileAttributes Lib "kernel32" _ \n(ByVal lpFileName As String , A) As Long',
'Private Declare Function GetFileAttributes Lib "kernel32" _ \n(ByVal lpFileName As String ) As Long',
])
# << Parsing tests >> (17 of 61)
# General on error goto
tests.extend([
"On Error GoTo 100",
"On Error GoTo ErrTrap",
"On Error GoTo 100 ' comment",
"On Error GoTo ErrTrap ' comment",
"100: On Error GoTo 100",
"label: On Error GoTo ErrTrap",
"100: On Error GoTo 100 ' comment",
"label: On Error GoTo ErrTrap ' comment",
])
# General on error resume next
tests.extend([
"On Error Resume Next",
"On Error Resume Next ' comment",
"100: On Error Resume Next",
"label: On Error Resume Next",
"100: On Error Resume Next ' comment",
"label: On Error Resume Next ' comment",
])
# General on error goto -
tests.extend([
"On Error GoTo 0",
"On Error GoTo 0 ' comment",
"100: On Error GoTo 0",
"100: On Error GoTo 0 ' comment",
])
# On something goto list
tests.extend([
"On var GoTo 20",
"On var GoTo 10,20,30,40",
])
# Resume
tests.extend([
"label: Resume Next",
"Resume Next",
"label: Resume Next ' Comment",
"label: Resume 7",
"Resume 7",
"label: Resume 7 ' Comment",
"label: Resume",
"Resume\na=1",
"label: Resume' Comment",
])
# General on local error resume next
tests.extend([
"On Local Error Resume Next",
"On Local Error Resume Next ' comment",
"100: On Local Error Resume Next",
"label: On Local Error Resume Next",
"100: On Local Error Resume Next ' comment",
"label: On Local Error Resume Next ' comment",
])
# Bug #809979 - On Error with : after the label fails
tests.extend([
"On Error GoTo 0:\na=1",
"On Error GoTo 0: ' comment",
"100: On Error GoTo 0:\na=1",
"100: On Error GoTo 0: ' comment",
"On Error GoTo lbl:\na=1",
"On Error GoTo lbl: ' comment",
"100: On Error GoTo lbl:\na=1",
"100: On Error GoTo lbl: ' comment",
])
# << Parsing tests >> (18 of 61)
# Lines
tests.extend([
"Line (10,20)-(30,40), 10, 20",
"obj.Pset (10, 20), RGB(1,2,2)",
])
# Move
tests.extend([
"Move (Screen.Width - Width) / 2, (Screen.Height - Height) / 2",
])
# << Parsing tests >> (19 of 61)
# General name test (rename a file)
tests.extend([
"Name file As otherfile",
"Name file & extension As otherfile",
"Name file & extension As otherfile & otherextension",
'Name path & "\origname.txt" As path & "\knewname.txt"',
])
# << Parsing tests >> (20 of 61)
# Attributes at the head of a file
tests.extend([
'Attribute VB_Name = "frmMain"',
'Attribute VB_GlobalNameSpace = False',
'Attribute VB_Creatable = False',
'Attribute VB_PredeclaredId = True',
'Attribute VB_Exposed = False',
'Attribute Me.VB_Exposed = False',
'Attribute Me.VB_Exposed = False, 1, 2, 3, 4',
'Attribute Me.VB_Exposed = False, "1", "2, 3,", 4',
])
# << Parsing tests >> (21 of 61)
# Attributes at the head of a file
tests.extend([
"""
Enum thing
_one = 1
_two = 2
_three = 3
_four = 4
End Enum
""",
"""
Enum thing
_one
_two
_three
_four
End Enum
""",
])
# << Parsing tests >> (22 of 61)
# Types
tests.extend([
"""
Private Type ShellFileInfoType
hIcon As Long
iIcon As Long
dwAttributes As Long
szDisplayName As String * 260
szTypeName As String * 80
End Type
"""
])
# << Parsing tests >> (23 of 61)
# The Option statement
tests.extend([
"Option Base 0",
"Option Base 1",
"Option Explicit",
"Option String Compare",
"Option String Compare Text",
])
# << Parsing tests >> (24 of 61)
# The End statement
tests.extend([
"10: End",
"End",
"End ' wow this is it",
"10: End ' this is the end",
])
# If with an 'End' in there
tests.append("""
If a = 10 Then
End
End If
""")
# Sub with an 'End' in there
tests.append("""
Sub doit()
End
End Sub
""")
# Fn with an 'End' in there
tests.append("""
Function doit()
End
End Function
""")
# With with an 'End' in there
tests.append("""
With obj
End
End With
""")
# << Parsing tests >> (25 of 61)
# The Event statement
tests.extend([
"Event doit()",
"Public Event doit()",
"Private Event doit()",
"Public Event doit(a, b, c, e)",
"Public Event doit(a As Integer, b As Long, c(), e As Command.Button)",
])
# << Parsing tests >> (26 of 61)
# The Debug.Print statement
tests.extend([
"Debug.Print",
"Debug.Print a",
"Debug.Print a,b",
"Debug.Print a;b",
"Debug.Print a+10;b+20",
"Debug.Print a+20, b-20",
"Debug.Print a;b;",
])
# << Parsing tests >> (27 of 61)
# Recordset notation
tests.extend([
"RS!diskID = DriveID",
"RS!diskID = DriveID+10",
'RS!diskID | |
"int veclen2__Fii(int ix, int iy)")
del_items(0x800454BC)
SetType(0x800454BC, "void set_light_bands__Fv()")
del_items(0x80045530)
SetType(0x80045530, "void SetLightFX__FiisssUcUcUc(int x, int y, short s_r, short s_g, int s_b, int d_r, int d_g, int d_b)")
del_items(0x8004559C)
SetType(0x8004559C, "void DoLighting__Fiiii(int nXPos, int nYPos, int nRadius, int Lnum)")
del_items(0x8004624C)
SetType(0x8004624C, "void DoUnLight__Fv()")
del_items(0x80046490)
SetType(0x80046490, "void DoUnVision__Fiii(int nXPos, int nYPos, int nRadius)")
del_items(0x80046554)
SetType(0x80046554, "void DoVision__FiiiUcUc(int nXPos, int nYPos, int nRadius, unsigned char doautomap, int visible)")
del_items(0x80046A64)
SetType(0x80046A64, "void FreeLightTable__Fv()")
del_items(0x80046A6C)
SetType(0x80046A6C, "void InitLightTable__Fv()")
del_items(0x80046A74)
SetType(0x80046A74, "void MakeLightTable__Fv()")
del_items(0x80046A7C)
SetType(0x80046A7C, "void InitLightMax__Fv()")
del_items(0x80046AA0)
SetType(0x80046AA0, "void InitLighting__Fv()")
del_items(0x80046AE4)
SetType(0x80046AE4, "int AddLight__Fiii(int x, int y, int r)")
del_items(0x80046B78)
SetType(0x80046B78, "void AddUnLight__Fi(int i)")
del_items(0x80046BA8)
SetType(0x80046BA8, "void ChangeLightRadius__Fii(int i, int r)")
del_items(0x80046BD4)
SetType(0x80046BD4, "void ChangeLightXY__Fiii(int i, int x, int y)")
del_items(0x80046C0C)
SetType(0x80046C0C, "void light_fix__Fi(int i)")
del_items(0x80046C14)
SetType(0x80046C14, "void ChangeLightOff__Fiii(int i, int x, int y)")
del_items(0x80046C4C)
SetType(0x80046C4C, "void ChangeLight__Fiiii(int i, int x, int y, int r)")
del_items(0x80046C90)
SetType(0x80046C90, "void ChangeLightColour__Fii(int i, int c)")
del_items(0x80046CC0)
SetType(0x80046CC0, "void ProcessLightList__Fv()")
del_items(0x80046DEC)
SetType(0x80046DEC, "void SavePreLighting__Fv()")
del_items(0x80046DF4)
SetType(0x80046DF4, "void InitVision__Fv()")
del_items(0x80046E44)
SetType(0x80046E44, "int AddVision__FiiiUc(int x, int y, int r, unsigned char mine)")
del_items(0x80046F48)
SetType(0x80046F48, "void ChangeVisionRadius__Fii(int id, int r)")
del_items(0x80046FFC)
SetType(0x80046FFC, "void ChangeVisionXY__Fiii(int id, int x, int y)")
del_items(0x800470B4)
SetType(0x800470B4, "void ProcessVisionList__Fv()")
del_items(0x80047314)
SetType(0x80047314, "void FreeQuestText__Fv()")
del_items(0x8004731C)
SetType(0x8004731C, "void InitQuestText__Fv()")
del_items(0x80047328)
SetType(0x80047328, "void CalcTextSpeed__FPCc(char *Name)")
del_items(0x80047468)
SetType(0x80047468, "void InitQTextMsg__Fi(int m)")
del_items(0x8004757C)
SetType(0x8004757C, "void DrawQTextBack__Fv()")
del_items(0x800475EC)
SetType(0x800475EC, "void DrawQTextTSK__FP4TASK(struct TASK *T)")
del_items(0x800476BC)
SetType(0x800476BC, "void DrawQText__Fv()")
del_items(0x80047A04)
SetType(0x80047A04, "void _GLOBAL__D_QBack()")
del_items(0x80047A2C)
SetType(0x80047A2C, "void _GLOBAL__I_QBack()")
del_items(0x80047A54)
SetType(0x80047A54, "void SetRGB__6DialogUcUcUc_addr_80047A54(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x80047A74)
SetType(0x80047A74, "void SetBorder__6Dialogi_addr_80047A74(struct Dialog *this, int Type)")
del_items(0x80047A7C)
SetType(0x80047A7C, "void ___6Dialog_addr_80047A7C(struct Dialog *this, int __in_chrg)")
del_items(0x80047AA4)
SetType(0x80047AA4, "struct Dialog *__6Dialog_addr_80047AA4(struct Dialog *this)")
del_items(0x80047B00)
SetType(0x80047B00, "int GetCharWidth__5CFontc_addr_80047B00(struct CFont *this, char ch)")
del_items(0x80047B58)
SetType(0x80047B58, "struct FRAME_HDR *GetFr__7TextDati_addr_80047B58(struct TextDat *this, int FrNum)")
del_items(0x80047B74)
SetType(0x80047B74, "void nullmissile__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80047B7C)
SetType(0x80047B7C, "void FuncNULL__FP13MissileStructiii(struct MissileStruct *Ms, int ScrX, int ScrY, int OtPos)")
del_items(0x80047B84)
SetType(0x80047B84, "void delta_init__Fv()")
del_items(0x80047BE4)
SetType(0x80047BE4, "void delta_kill_monster__FiUcUcUc(int mi, unsigned char x, unsigned char y, unsigned char bLevel)")
del_items(0x80047C80)
SetType(0x80047C80, "void delta_monster_hp__FilUc(int mi, long hp, unsigned char bLevel)")
del_items(0x80047D04)
SetType(0x80047D04, "void delta_sync_golem__FPC9TCmdGolemiUc(struct TCmdGolem *pG, int pnum, unsigned char bLevel)")
del_items(0x80047D94)
SetType(0x80047D94, "void delta_leave_sync__FUc(unsigned char bLevel)")
del_items(0x800480C0)
SetType(0x800480C0, "void delta_sync_object__FiUcUc(int oi, unsigned char bCmd, unsigned char bLevel)")
del_items(0x80048120)
SetType(0x80048120, "unsigned char delta_get_item__FPC9TCmdGItemUc(struct TCmdGItem *pI, unsigned char bLevel)")
del_items(0x800482E4)
SetType(0x800482E4, "void delta_put_item__FPC9TCmdPItemiiUc(struct TCmdPItem *pI, int x, int y, unsigned char bLevel)")
del_items(0x8004846C)
SetType(0x8004846C, "unsigned char delta_portal_inited__Fi(int i)")
del_items(0x80048490)
SetType(0x80048490, "unsigned char delta_quest_inited__Fi(int i)")
del_items(0x800484B4)
SetType(0x800484B4, "void DeltaAddItem__Fi(int ii)")
del_items(0x800486C8)
SetType(0x800486C8, "int DeltaExportData__FPc(char *Dst)")
del_items(0x800486F8)
SetType(0x800486F8, "int DeltaImportData__FPc(char *Src)")
del_items(0x8004872C)
SetType(0x8004872C, "void DeltaSaveLevel__Fv()")
del_items(0x80048828)
SetType(0x80048828, "void NetSendCmd__FUcUc(unsigned char bHiPri, unsigned char bCmd)")
del_items(0x80048850)
SetType(0x80048850, "void NetSendCmdGolem__FUcUcUcUclUc(unsigned char mx, unsigned char my, unsigned char dir, unsigned char menemy, long hp, int cl)")
del_items(0x8004889C)
SetType(0x8004889C, "void NetSendCmdLoc__FUcUcUcUc(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y)")
del_items(0x800488CC)
SetType(0x800488CC, "void NetSendCmdLocParam1__FUcUcUcUcUs(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y, int wParam1)")
del_items(0x80048904)
SetType(0x80048904, "void NetSendCmdLocParam2__FUcUcUcUcUsUs(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y, int wParam1, int wParam2)")
del_items(0x80048944)
SetType(0x80048944, "void NetSendCmdLocParam3__FUcUcUcUcUsUsUs(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y, int wParam1, int wParam2, int wParam3)")
del_items(0x8004898C)
SetType(0x8004898C, "void NetSendCmdParam1__FUcUcUs(unsigned char bHiPri, unsigned char bCmd, unsigned short wParam1)")
del_items(0x800489B8)
SetType(0x800489B8, "void NetSendCmdParam2__FUcUcUsUs(unsigned char bHiPri, unsigned char bCmd, unsigned short wParam1, unsigned short wParam2)")
del_items(0x800489E8)
SetType(0x800489E8, "void NetSendCmdParam3__FUcUcUsUsUs(unsigned char bHiPri, unsigned char bCmd, unsigned short wParam1, unsigned short wParam2, int wParam3)")
del_items(0x80048A20)
SetType(0x80048A20, "void NetSendCmdQuest__FUcUc(unsigned char bHiPri, unsigned char q)")
del_items(0x80048A94)
SetType(0x80048A94, "void NetSendCmdGItem__FUcUcUcUcUc(unsigned char bHiPri, unsigned char bCmd, unsigned char mast, unsigned char pnum, int ii)")
del_items(0x80048BC8)
SetType(0x80048BC8, "void NetSendCmdGItem2__FUcUcUcUcPC9TCmdGItem(unsigned char usonly, unsigned char bCmd, unsigned char mast, unsigned char pnum, struct TCmdGItem *p)")
del_items(0x80048C44)
SetType(0x80048C44, "unsigned char NetSendCmdReq2__FUcUcUcPC9TCmdGItem(unsigned char bCmd, unsigned char mast, unsigned char pnum, struct TCmdGItem *p)")
del_items(0x80048C9C)
SetType(0x80048C9C, "void NetSendCmdExtra__FPC9TCmdGItem(struct TCmdGItem *p)")
del_items(0x80048D04)
SetType(0x80048D04, "void NetSendCmdPItem__FUcUcUcUc(unsigned char bHiPri, unsigned char bCmd, unsigned char x, unsigned char y)")
del_items(0x80048E0C)
SetType(0x80048E0C, "void NetSendCmdChItem__FUcUc(unsigned char bHiPri, unsigned char bLoc)")
del_items(0x80048EB0)
SetType(0x80048EB0, "void NetSendCmdDelItem__FUcUc(unsigned char bHiPri, unsigned char bLoc)")
del_items(0x80048EE0)
SetType(0x80048EE0, "void NetSendCmdDItem__FUci(unsigned char bHiPri, int ii)")
del_items(0x80048FF4)
SetType(0x80048FF4, "unsigned char i_own_level__Fi(int nReqLevel)")
del_items(0x80048FFC)
SetType(0x80048FFC, "void NetSendCmdDamage__FUcUcUl(unsigned char bHiPri, unsigned char bPlr, unsigned long dwDam)")
del_items(0x80049030)
SetType(0x80049030, "void delta_open_portal__FiUcUcUcUcUc(int pnum, unsigned char x, unsigned char y, unsigned char bLevel, int bLType, int bSetLvl)")
del_items(0x8004908C)
SetType(0x8004908C, "void delta_close_portal__Fi(int pnum)")
del_items(0x800490CC)
SetType(0x800490CC, "void check_update_plr__Fi(int pnum)")
del_items(0x800490D4)
SetType(0x800490D4, "void On_WALKXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049178)
SetType(0x80049178, "void On_ADDSTR__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x800491A8)
SetType(0x800491A8, "void On_ADDMAG__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x800491D8)
SetType(0x800491D8, "void On_ADDDEX__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049208)
SetType(0x80049208, "void On_ADDVIT__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049238)
SetType(0x80049238, "void On_SBSPELL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x800492AC)
SetType(0x800492AC, "void On_GOTOGETITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049350)
SetType(0x80049350, "void On_REQUESTGITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049490)
SetType(0x80049490, "void On_GETITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049660)
SetType(0x80049660, "void On_GOTOAGETITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049704)
SetType(0x80049704, "void On_REQUESTAGITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049838)
SetType(0x80049838, "void On_AGETITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049A00)
SetType(0x80049A00, "void On_ITEMEXTRA__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049A9C)
SetType(0x80049A9C, "void On_PUTITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049C4C)
SetType(0x80049C4C, "void On_SYNCPUTITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049D88)
SetType(0x80049D88, "void On_RESPAWNITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049EA0)
SetType(0x80049EA0, "void On_SATTACKXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x80049F4C)
SetType(0x80049F4C, "void On_SPELLXYD__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A054)
SetType(0x8004A054, "void On_SPELLXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A14C)
SetType(0x8004A14C, "void On_TSPELLXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A248)
SetType(0x8004A248, "void On_OPOBJXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A370)
SetType(0x8004A370, "void On_DISARMXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A498)
SetType(0x8004A498, "void On_OPOBJT__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A500)
SetType(0x8004A500, "void On_ATTACKID__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A654)
SetType(0x8004A654, "void On_SPELLID__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A73C)
SetType(0x8004A73C, "void On_SPELLPID__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A81C)
SetType(0x8004A81C, "void On_TSPELLID__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A900)
SetType(0x8004A900, "void On_TSPELLPID__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004A9E4)
SetType(0x8004A9E4, "void On_KNOCKBACK__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004AA70)
SetType(0x8004AA70, "void On_RESURRECT__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004AAA8)
SetType(0x8004AAA8, "void On_HEALOTHER__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004AB18)
SetType(0x8004AB18, "void On_TALKXY__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004ABBC)
SetType(0x8004ABBC, "void On_NEWLVL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004ABF4)
SetType(0x8004ABF4, "void On_WARP__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004ACD8)
SetType(0x8004ACD8, "void On_MONSTDEATH__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004AD44)
SetType(0x8004AD44, "void On_KILLGOLEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004ADB0)
SetType(0x8004ADB0, "void On_AWAKEGOLEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004AF20)
SetType(0x8004AF20, "void On_MONSTDAMAGE__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B024)
SetType(0x8004B024, "void On_PLRDEAD__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B06C)
SetType(0x8004B06C, "void On_PLRDAMAGE__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B228)
SetType(0x8004B228, "void On_OPENDOOR__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B2C0)
SetType(0x8004B2C0, "void On_CLOSEDOOR__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B358)
SetType(0x8004B358, "void On_OPERATEOBJ__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B3F0)
SetType(0x8004B3F0, "void On_PLROPOBJ__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B484)
SetType(0x8004B484, "void On_BREAKOBJ__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B518)
SetType(0x8004B518, "void On_CHANGEPLRITEMS__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B520)
SetType(0x8004B520, "void On_DELPLRITEMS__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B528)
SetType(0x8004B528, "void On_PLRLEVEL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B530)
SetType(0x8004B530, "void On_DROPITEM__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B588)
SetType(0x8004B588, "void On_PLAYER_JOINLEVEL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B81C)
SetType(0x8004B81C, "void On_ACTIVATEPORTAL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B9AC)
SetType(0x8004B9AC, "void On_DEACTIVATEPORTAL__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004B9FC)
SetType(0x8004B9FC, "void On_RETOWN__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BA44)
SetType(0x8004BA44, "void On_SETSTR__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BA84)
SetType(0x8004BA84, "void On_SETDEX__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BAC4)
SetType(0x8004BAC4, "void On_SETMAG__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BB04)
SetType(0x8004BB04, "void On_SETVIT__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BB44)
SetType(0x8004BB44, "void On_SYNCQUEST__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BB8C)
SetType(0x8004BB8C, "void On_ENDSHIELD__FPC4TCmdi(struct TCmd *pCmd, int pnum)")
del_items(0x8004BCA8)
SetType(0x8004BCA8, "unsigned long ParseCmd__FiPC4TCmd(int pnum, struct TCmd *pCmd)")
del_items(0x8004C0C8)
SetType(0x8004C0C8, "struct DLevel *GetDLevel__Fib(int LevNum, bool SetLevel)")
del_items(0x8004C158)
SetType(0x8004C158, "void ReleaseDLevel__FP6DLevel(struct DLevel *Dl)")
del_items(0x8004C190)
SetType(0x8004C190, "void NetSendLoPri__FPCUcUc(unsigned char *pbMsg, unsigned char bLen)")
del_items(0x8004C1BC)
SetType(0x8004C1BC, "int InitLevelType__Fi(int l)")
del_items(0x8004C208)
SetType(0x8004C208, "void SetupLocalCoords__Fv()")
del_items(0x8004C398)
SetType(0x8004C398, "void InitNewSeed__Fl(long newseed)")
del_items(0x8004C40C)
SetType(0x8004C40C, "unsigned char NetInit__FUcPUc(unsigned char bSinglePlayer, unsigned char *pfExitProgram)")
del_items(0x8004C634)
SetType(0x8004C634, "void PostAddL1Door__Fiiii(int i, int x, int y, int ot)")
del_items(0x8004C76C)
SetType(0x8004C76C, "void PostAddL2Door__Fiiii(int i, int x, int y, int ot)")
del_items(0x8004C8B8)
SetType(0x8004C8B8, "void PostAddArmorStand__Fi(int i)")
del_items(0x8004C940)
SetType(0x8004C940, "unsigned char PostTorchLocOK__Fii(int xp, int yp)")
del_items(0x8004C980)
SetType(0x8004C980, "void PostAddObjLight__Fii(int i, int r)")
del_items(0x8004CA24)
SetType(0x8004CA24, "void PostObjObjAddSwitch__Fiiii(int ot, int ox, int oy, int oi)")
del_items(0x8004CAD8)
SetType(0x8004CAD8, "void InitObjectGFX__Fv()")
del_items(0x8004CCF4)
SetType(0x8004CCF4, "void FreeObjectGFX__Fv()")
del_items(0x8004CD00)
SetType(0x8004CD00, "void DeleteObject__Fii(int oi, int i)")
del_items(0x8004CDB8)
SetType(0x8004CDB8, "void SetupObject__Fiiii(int i, int x, int y, int ot)")
del_items(0x8004D03C)
SetType(0x8004D03C, "void SetObjMapRange__Fiiiiii(int i, int x1, int y1, int x2, int y2, int v)")
del_items(0x8004D09C)
SetType(0x8004D09C, "void SetBookMsg__Fii(int i, int msg)")
del_items(0x8004D0C4)
SetType(0x8004D0C4, "void AddObject__Fiii(int ot, int ox, int oy)")
del_items(0x8004D1D0)
SetType(0x8004D1D0, "void PostAddObject__Fiii(int ot, int ox, int oy)")
del_items(0x8004D2DC)
SetType(0x8004D2DC, "void Obj_Light__Fii(int i, int lr)")
del_items(0x8004D4EC)
SetType(0x8004D4EC, "void Obj_Circle__Fi(int i)")
del_items(0x8004D810)
SetType(0x8004D810, "void Obj_StopAnim__Fi(int i)")
del_items(0x8004D874)
SetType(0x8004D874, "void DrawExpl__Fiiiiiccc(int sx, int sy, int f, int ot, int scale, int rtint, int gtint, int btint)")
del_items(0x8004DB50)
SetType(0x8004DB50, "void DrawObjExpl__FP12ObjectStructiii(struct ObjectStruct *obj, int ScrX, int ScrY, int ot)")
del_items(0x8004DBC0)
SetType(0x8004DBC0, "void Obj_Door__Fi(int i)")
del_items(0x8004DD54)
SetType(0x8004DD54, "void Obj_Sarc__Fi(int i)")
del_items(0x8004DDA0)
SetType(0x8004DDA0, "void ActivateTrapLine__Fii(int ttype, int tid)")
del_items(0x8004DEB0)
SetType(0x8004DEB0, "void Obj_FlameTrap__Fi(int i)")
del_items(0x8004E180)
SetType(0x8004E180, "void Obj_Trap__Fi(int i)")
del_items(0x8004E4D0)
SetType(0x8004E4D0, "void Obj_BCrossDamage__Fi(int i)")
del_items(0x8004E760)
SetType(0x8004E760, "void ProcessObjects__Fv()")
del_items(0x8004EA00)
SetType(0x8004EA00, "void ObjSetMicro__Fiii(int dx, int dy, int pn)")
del_items(0x8004EA38)
SetType(0x8004EA38, "void ObjSetMini__Fiii(int x, int y, int v)")
del_items(0x8004EB0C)
SetType(0x8004EB0C, "void ObjL1Special__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x8004EB14)
SetType(0x8004EB14, "void ObjL2Special__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x8004EB1C)
SetType(0x8004EB1C, "void DoorSet__Fiii(int oi, int dx, int dy)")
del_items(0x8004ED9C)
SetType(0x8004ED9C, "void RedoPlayerVision__Fv()")
del_items(0x8004EE40)
SetType(0x8004EE40, "void OperateL1RDoor__FiiUc(int pnum, int oi, unsigned char sendflag)")
del_items(0x8004F1E4)
SetType(0x8004F1E4, "void OperateL1LDoor__FiiUc(int pnum, int oi, unsigned char sendflag)")
del_items(0x8004F5BC)
SetType(0x8004F5BC, "void OperateL2RDoor__FiiUc(int pnum, int oi, unsigned char sendflag)")
del_items(0x8004F954)
SetType(0x8004F954, "void OperateL2LDoor__FiiUc(int pnum, int oi, unsigned char sendflag)")
del_items(0x8004FCEC)
SetType(0x8004FCEC, "void OperateL3RDoor__FiiUc(int pnum, int oi, unsigned char sendflag)")
del_items(0x8004FFF4)
SetType(0x8004FFF4, "void OperateL3LDoor__FiiUc(int pnum, int oi, unsigned char sendflag)")
del_items(0x800502FC)
SetType(0x800502FC, "void MonstCheckDoors__Fi(int m)")
del_items(0x800507F8)
SetType(0x800507F8, "void PostAddL1Objs__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80050930)
SetType(0x80050930, "void PostAddL2Objs__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80050A44)
SetType(0x80050A44, "void ObjChangeMap__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80050BFC)
SetType(0x80050BFC, "void DRLG_MRectTrans__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80050CA8)
SetType(0x80050CA8, "void ObjChangeMapResync__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80050E18)
SetType(0x80050E18, "void OperateL1Door__FiiUc(int pnum, int i, unsigned char sendflag)")
del_items(0x80050F74)
SetType(0x80050F74, "void OperateLever__Fii(int pnum, int i)")
del_items(0x80051160)
SetType(0x80051160, "void OperateBook__Fii(int pnum, int i)")
del_items(0x80051654)
SetType(0x80051654, "void OperateBookLever__Fii(int pnum, int i)")
del_items(0x800519FC)
SetType(0x800519FC, "void OperateSChambBk__Fii(int pnum, int i)")
del_items(0x80051BD4)
SetType(0x80051BD4, "void OperateChest__FiiUc(int pnum, | |
brain surface where the data has been measured, e.g., 'white' or 'pial'. This will become part of the file name that is loaded. For white, nothing will be added. Defaults to 'white'.
hemi : {'both', 'lh', 'rh'}
The hemisphere that should be loaded.
subjects_dir: string
A string representing the full path to a directory. This should be the directory containing all subjects of your study.
Returns
-------
morphometry_data: numpy array
A numpy array with as many entries as there are vertices in the subject. If you load two hemispheres instead of one, the length doubles. You can get the start indices for data of the hemispheres in the returned `meta_data`, see `meta_data['lh.num_vertices']` and `meta_data['rh.num_vertices']`. You can be sure that the data for the left hemisphere will always come first (if both were loaded). Indices start at 0, of course. So if the left hemisphere has `n` vertices, the data for them are at indices `0..n-1`, and the data for the right hemisphere start at index `n`. Note that the two hemispheres do in general NOT have the same number of vertices.
meta_data: dictionary
A dictionary containing detailed information on all files that were loaded and used settings. The following keys are available (depending on the value of the `hemi` argument, you can replace ?h with 'lh' or 'rh' or both 'lh' and 'rh'):
- `?h.num_data_points` : the number of data points loaded.
- `?h.morphometry_file` : the value of the `?h_morphometry_data_file` argument (data file that was loaded)
- `?h.morphometry_file_format` : the value for `format` that was used
- `subject_id` : the subject id
- `subjects_dir` : the subjects dir that was used
- `surf` : the surf that was used, e.g., 'white'
- `measure` : the measure that was loaded as morphometry data, e.g., 'area'
- `space` : always the string 'subject'. This means that the data loaded represent morphometry data taken from the subject's surface (as opposed to data mapped to a common or average subject).
- `hemi` : the `hemi` value that was used
Examples
--------
>>> import brainload as bl
>>> morphometry_data, meta_data = bl.subject_data_native('subject1', '/mnt/study1_data', 'thickness', 'both')
"""
vert_coords, faces, morphometry_data, meta_data = subject(subject_id, surf=surf, measure=measure, hemi=hemi, subjects_dir=subjects_dir, load_surface_files=False)
return morphometry_data, meta_data
def subject_data_standard(subject_id, subjects_dir, measure, hemi, fwhm, average_subject='fsaverage', surf='white'):
"""
Load standard space data for a subject (e.g., lh.area.fwhm10.fsaverage.mgh).
Parameters
----------
subject_id: string
The subject identifier of the subject. As always, it is assumed that this is the name of the directory containing the subject's data, relative to `subjects_dir`. Example: 'subject33'.
measure : string
The measure to load, e.g., 'area' or 'curv'.
surf : string, optional
The brain surface where the data has been measured, e.g., 'white' or 'pial'. This will become part of the file name that is loaded. Defaults to 'white'.
hemi : {'both', 'lh', 'rh'}
The hemisphere that should be loaded.
fwhm : string or None
Which averaging version of the data should be loaded. FreeSurfer usually generates different standard space files with a number of smoothing settings. If None is passed, the `.fwhmX` part is omitted from the file name completely. Set this to '0' to get the unsmoothed version.
subjects_dir: string
A string representing the full path to a directory. This should be the directory containing all subjects of your study.
average_subject: string, optional
The name of the average subject to which the data was mapped. Defaults to 'fsaverage'.
Returns
-------
morphometry_data: numpy array
A numpy array with as many entries as there are vertices in the subject. If you load two hemispheres instead of one, the length doubles. You can get the start indices for data of the hemispheres in the returned `meta_data`, see `meta_data['lh.num_vertices']` and `meta_data['rh.num_vertices']`. You can be sure that the data for the left hemisphere will always come first (if both were loaded). Indices start at 0, of course. So if the left hemisphere has `n` vertices, the data for them are at indices `0..n-1`, and the data for the right hemisphere start at index `n`. Note that the two hemispheres do in general NOT have the same number of vertices.
meta_data: dictionary
A dictionary containing detailed information on all files that were loaded and used settings. The following keys are available (depending on the value of the `hemi` argument, you can replace ?h with 'lh' or 'rh' or both 'lh' and 'rh'):
- `?h.num_data_points` : the number of data points loaded.
- `?h.morphometry_file` : the value of the `?h_morphometry_data_file` argument (data file that was loaded)
- `?h.morphometry_file_format` : the value for `format` that was used
- `subject_id` : the subject id
- `subjects_dir` : the subjects dir that was used
- `surf` : the surf that was used, e.g., 'white'
- `measure` : the measure that was loaded as morphometry data, e.g., 'area'
- `space` : always the string 'common'. This means that the data loaded represent morphometry data that has been mapped to a common or average subject.
- `hemi` : the `hemi` value that was used
"""
vert_coords, faces, morphometry_data, meta_data = subject_avg(subject_id, measure=measure, surf=surf, hemi=hemi, fwhm=fwhm, subjects_dir=subjects_dir, average_subject=average_subject, load_surface_files=False)
return morphometry_data, meta_data
def subject(subject_id, surf='white', measure='area', hemi='both', subjects_dir=None, meta_data=None, load_surface_files=True, load_morphometry_data=True):
"""
Load FreeSurfer brain morphometry and/or mesh data for a single subject.
High-level interface to load FreeSurfer brain data for a single space. This parses the data for the surfaces of this subject. If you want to load data that has been mapped to an average subject like 'fsaverage', use `subject_avg` instead.
Parameters
----------
subject_id: string
The subject identifier of the subject. As always, it is assumed that this is the name of the directory containing the subject's data, relative to `subjects_dir`. Example: 'subject33'.
measure : string, optional
The measure to load, e.g., 'area' or 'curv'. Defaults to 'area'.
surf : string, optional
The brain surface where the data has been measured, e.g., 'white' or 'pial'. This will become part of the file name that is loaded. Defaults to 'white'.
hemi : {'both', 'lh', 'rh'}, optional
The hemisphere that should be loaded. Defaults to 'both'.
subjects_dir: string, optional
A string representing the full path to a directory. This should be the directory containing all subjects of your study. Defaults to the environment variable SUBJECTS_DIR if omitted. If that is not set, used the current working directory instead. This is the directory from which the application was executed.
meta_data: dictionary, optional
A dictionary that should be merged into the return value `meta_data`. Defaults to the empty dictionary if omitted.
load_surface_files: boolean, optional
Whether to load mesh data. If set to `False`, the first return values `vert_coords` and `faces` will be `None`. Defaults to `True`.
load_morphometry_data: boolean, optional
Whether to load morphometry data. If set to `False`, the first return value `morphometry_data` will be `None`. Defaults to `True`.
Returns
-------
vert_coords: numpy array
A 2-dimensional array containing the vertices of the mesh(es) of the subject. Each vertex entry contains 3 coordinates. Each coordinate describes a 3D position in a FreeSurfer surface file (e.g., 'lh.white'), as returned by the `nibabel` function `nibabel.freesurfer.io.read_geometry`.
faces: numpy array
A 2-dimensional array containing the 3-faces of the mesh(es) of the subject. Each face entry contains 3 indices. Each index references the respective vertex in the `vert_coords` array.
morphometry_data: numpy array
A numpy array with as many entries as there are vertices in the subject. If you load two hemispheres instead of one, the length doubles. You can get the start indices for data of the hemispheres in the returned `meta_data`, see `meta_data['lh.num_vertices']` and `meta_data['rh.num_vertices']`. You can be sure that the data for the left hemisphere will always come first (if both were loaded). Indices start at 0, of course. So if the left hemisphere has `n` vertices, the data for them are at indices `0..n-1`, and the data for the right | |
fill=color_neuron, outline=color_neuron)
if lay_scheme[gNum][2]:
#center
self.canvas_brain.create_rectangle(x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + neuron_draw_size / 3,
nr_preset + down_shift + nr_bound_h + neuron_draw_size / 3,
x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + 2 * neuron_draw_size / 3,
nr_preset + down_shift + 2 * neuron_draw_size / 3 + nr_bound_h,
fill=color_inside, outline=color_inside)
else:
#center
self.canvas_brain.create_arc(x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + neuron_draw_size / 3,
nr_preset + down_shift + nr_bound_h + neuron_draw_size / 3,
x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + 2 * neuron_draw_size / 3,
nr_preset + down_shift + 2 * neuron_draw_size / 3 + nr_bound_h,
start=1, extent=359, fill=color_inside, outline=color_inside)
self.canvas_brain.create_arc(x_group_preset + nr_bound_w + colNum * (neuron_draw_size + nr_bound_w) + neuron_draw_size / 3,
nr_preset + down_shift + nr_bound_h + neuron_draw_size / 3,
x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + 2 * neuron_draw_size / 3,
nr_preset + down_shift + 2 * neuron_draw_size / 3 + nr_bound_h,
start=1, extent=359, fill=color_inside, outline=color_inside)
if colNum == 0:
#input
self.canvas_brain.create_line(x_input_vec_preset + x_inp_vec_width,
gr_preset + (group_h - 2 * gr_bound) / 2,
x_group_preset,
nr_preset + down_shift + nr_bound_h + neuron_draw_size/2,
fill='#000000', arrow=LAST)
#output
self.canvas_brain.create_line(x_group_preset + lay_thicc - 2 * gr_bound,
nr_preset + down_shift + nr_bound_h + neuron_draw_size/2,
x_output_vec_preset,
nr_preset + down_shift + nr_bound_h + neuron_draw_size/2,
fill='#000000', arrow=LAST)
#self.canvas_brain.create_line(x_output_vec_preset,
# nr_preset,
# x_output_vec_preset + x_out_vec_width,
# nr_preset,
# fill='#000000')
nr_preset = nr_preset + neuron_draw_size + 2 * nr_bound_h
if nrNum == n_in_col:
colNum+=1
nrNum = 0
if colNum % 2 == 0:
nr_preset = gr_preset
else:
nr_preset = gr_preset
draw_neuron+=1
gr_preset = group_h+gr_preset
gNum+=1
layNum+=1
prev_lay_group_out_cord = group_out_cord
group_out_cord = []
self.scheme_window.mainloop()
def get_proportion(self, vector = []):
sum = 0
for el in vector:
sum += el
out_vector = []
for el in vector:
out_vector.append(el / sum)
return out_vector
def get_color(self, r=0, g=0, b=0):
clr = ((r * 1.0, g * 1.0, b * 1.0),
'#' + r.to_bytes(1, 'little').hex().__str__()
+ g.to_bytes(1, 'little').hex().__str__()
+ b.to_bytes(1, 'little').hex().__str__())
return clr
def frozed_mind(self):
build_scheme = []
for lay in self.lays:
build_scheme.append(lay.get_build_scheme())
consciousness = self.get_all_synapse_weight()
self.frozen_mind = {'scheme': build_scheme, 'weights': consciousness}
return self.frozen_mind
def unfrozed_mind(self, ice_piece):
self.__init__(laysConfigs = ice_piece['scheme'])
self.load_consciousness(consciousness = ice_piece['weights'])
def save_to_file(self, filename = 'frozen_mind.txt'):
self.frozed_mind()
f = open(filename, 'w')
f.write(json.dumps(self.frozen_mind))
f.close()
def load_from_file(self, filename = 'frozen_mind.txt'):
f = open(filename, 'r')
json_mind = f.read()
self.unfrozed_mind(ice_piece = json.loads(json_mind))
f.close()
def __gt__(self, other):
return self.NN_learning_temp > other.NN_learning_temp
def __lt__(self, other):
return self.NN_learning_temp < other.NN_learning_temp
def __ge__(self, other):
return self.NN_learning_temp >= other.NN_learning_temp
def __le__(self, other):
return self.NN_learning_temp <= other.NN_learning_temp
class lay_abstact():
'''layConfig = [ (0, True, 0, (0,), 25, 0), ]
лист туплов: колво нейронов в группе,
# плюс(True) или минус(False) на выходе нейронов группы,
# номер функции нейронов группы,
# дискретный выход у нейронов или нет (True - да)
# тулпа с номерами inputs-групп, с которыми связанна нейронная группа
# длинна входного input-вектора группы
# коэф нормализации вывода группы'''
def __init__(self, layConfig = [ (0, True, 0, True, (0,), 25, 0), ]):
self.neuronGroups_count = len(layConfig)
self.neuronGroups_list = []
self.neuronGroups_inputs_link = []
self.groupsNormalization_coeff = []
self.neuron_count = 0
if layConfig[0][0] == 0:
pass
else:
for neuron_group in layConfig:
neuton_with_pos_out = neuron_group[1]
lay_temp = []
i = 0
while i < neuron_group[0]:
neuron = neuron_abstact(generateWeightsCount=neuron_group[5],
positiveOutput=neuton_with_pos_out,
funcNum=neuron_group[2],
digitalOut=neuron_group[3] )
lay_temp.append(neuron)
self.neuron_count+=1
i+=1
self.neuronGroups_list.append(lay_temp)
self.neuronGroups_inputs_link.append(neuron_group[4])
self.groupsNormalization_coeff.append(neuron_group[6])
def get_excited(self, inputsGoups = [[],]):
output_groups = []
i=0
while i<self.neuronGroups_count:
group_input = []
neurou_group_output = []
for inputgroupNum in self.neuronGroups_inputs_link[i]:
group_input.extend(inputsGoups[inputgroupNum])
for neuron in self.neuronGroups_list[i]:
neurou_group_output.append(neuron.spike( group_input ))
if self.groupsNormalization_coeff[i] != 0:
neurou_group_output = self.normalize_vector(neurou_group_output, self.groupsNormalization_coeff[i])
output_groups.append(neurou_group_output)
i+=1
return output_groups
def normalize_vector(self, inputVector = [], norm_coeff = 1):
min_val = min(inputVector)
max_val = max(inputVector)
delitel = max_val - min_val
normalOutputVector = []
if delitel == 0:
for x_val in inputVector:
norm_x_val = 0
normalOutputVector.append(norm_x_val)
else:
for x_val in inputVector:
norm_x_val = ( (x_val - min_val) * norm_coeff )/delitel
normalOutputVector.append(norm_x_val)
return normalOutputVector
def fcingCooldown(self, inputGroups = [[],], output = [[],], desire_out = [[],],
changingNeuronsCount = 1, changingNeuronsWeightCount = 1, Lwa = 1):
errOuts = self.get_err_out(output, desire_out)
errOuts.sort()
changing_errOuts = []
changing_neuron_weights_info = []
inputs = []
len_groupinput = []
i = 0
while i < self.neuronGroups_count:
group_input = []
for inputgroupNum in self.neuronGroups_inputs_link[i]:
group_input.extend(inputGroups[inputgroupNum])
inputs.append(group_input)
i+=1
i = 0
while i < changingNeuronsCount and i < len(errOuts):
changing_errOuts.append(errOuts[i])
i+=1
for err_out in changing_errOuts:
err_neuron = self.neuron_at(err_out[2])
err_neuron_vector = err_neuron.learning_spike(inputs[err_out[2][0]])
err_neuron_vector.sort()
i = 0
Nwa = Lwa
while i < changingNeuronsWeightCount:
if err_out[1] == 1:#надо увеличить выход нейрона
if err_neuron_vector[i][1] < err_out[1]: #вес нейрона находится на отрицательном ребре
Nwa = - Lwa
self.neuron_at(err_out[2]).adjust_weight(err_neuron_vector[i][2], Nwa)
else: #вес нейрона находится на положительном ребре
Nwa = Lwa
self.neuron_at(err_out[2]).adjust_weight(err_neuron_vector[i][2], Nwa)
else: #надо уменьшить выход
if err_neuron_vector[i][1] < err_out[1]: #вес нейрона находится на отрицательном ребре
Nwa = Lwa
self.neuron_at(err_out[2]).adjust_weight(err_neuron_vector[i][2], Nwa)
else: #вес нейрона находится на положительном ребре
Nwa = - Lwa
self.neuron_at(err_out[2]).adjust_weight(err_neuron_vector[i][2], Nwa)
changing_neuron_weights_info.append( (err_out, err_neuron_vector[i], Nwa) )
i+=1
input_group_len = []
for input in inputGroups:
input_group_len.append(len(input))
for ch_neur in changing_neuron_weights_info:
#изменить инпут пропорионально аджастам весов нейронов
Gnum = 0
Nnum = ch_neur[1][2]
for link in self.neuronGroups_inputs_link[Gnum]:
if Nnum >= input_group_len[link]:
Nnum -= input_group_len[link]
else:
Gnum = link
break
inputGroups[Gnum][Nnum] += inputGroups[Gnum][Nnum] * ch_neur[2]
return inputGroups
def get_err_out(self, list, example_list):
'''На входе лист листов с интами'''
err_elements = []
try:
len1 = len(list)
i = 0
while i < len1:
len2 = len(list[i])
j = 0
while j < len2:
if list[i][j] != example_list[i][j]:
diff = example_list[i][j] - list[i][j]
delta = abs(diff)
sign = 1 if diff > 0 else -1
er_element = (delta, sign, (i,j))
err_elements.append(er_element)
j+=1
i+=1
finally:
return err_elements
return err_elements
def neuron_at(self, coord):
'''coord = (groupNum, neuronNum)'''
return self.neuronGroups_list[coord[0]][coord[1]]
def get_neuron_group_ref(self, NgNum = 0):
return self.neuronGroups_list[NgNum]
def get_draw_scheme(self):
draw_scheme = []
for group in self.neuronGroups_list:
neuron_count = len(group)
sigh = group[0].output_sign
digital_out = group[0].digital_out
links = self.neuronGroups_inputs_link[self.neuronGroups_list.index(group)]
weights_count = group[0].weightsCount
draw_scheme.append((neuron_count, sigh, digital_out, links, weights_count))
return draw_scheme
def get_build_scheme(self):
build_scheme = []
for group in self.neuronGroups_list:
neuron_count = len(group)
sigh = True if group[0].output_sign == 1 else False
digital_out = group[0].digital_out
links = self.neuronGroups_inputs_link[self.neuronGroups_list.index(group)]
weights_count = group[0].weightsCount
func_num = group[0].funcNum
normal_coef = self.groupsNormalization_coeff[self.neuronGroups_list.index(group)]
build_scheme.append((neuron_count, sigh, func_num, digital_out, links, weights_count, normal_coef))
return build_scheme
class neuron_abstact():
'''funcNum: 0 - сумматор (если цифровой выход, то пороговый сумматор
1 - рациональная сигмоида
threshold порог срабатывания для цифрового выхода'''
def __init__(self, weights = [], generateWeightsCount = 0, positiveOutput = True, funcNum = 0,
digitalOut = True):
if generateWeightsCount > 0:
self.weights = []
self.set_random_weights(weights, generateWeightsCount)
else:
self.weights = weights
self.weightsCount = len(self.weights)
self.output_sign = 1 if positiveOutput else -1
self.digital_out = digitalOut
self.funcNum = funcNum
self.threshold = round(generateWeightsCount/2)
self.recurrent_mem = []
def set_random_weights(self, weights = [], weightsCount = 25,):
if len(weights) == 0:
self.weights = [(9.9 + x - x) / randint(1, 100) + 0.1 for x in range(weightsCount)]
else:
self.weights = weights.copy()
self.weightsCount = len(self.weights)
def set_funcNum(self,funcNum):
self.funcNum = funcNum
def set_weights(self, weights = []):
self.weights = weights
self.weightsCount = len(self.weights)
if self.funcNum == 1:
for weight in self.weights:
if weight == 0:
weights = self.get_random_weight(from_=0.1,to=10)
def get_weights(self):
return self.weights
def get_random_weight(self, from_ = 0, to = 10):
return (to - from_) / randint(1, 100) + from_
def spike(self, input = []):
if self.funcNum == 0: #linear sum
output = 0
i = 0
while i < self.weightsCount:
output += ( input[i] * self.weights[i] )
i+=1
if self.digital_out:
return 1 if output > self.threshold else 0
else:
return output * self.output_sign
elif self.funcNum == 1: #rational sig
output = 0
i = 0
while i < self.weightsCount:
abs_inp = abs(input[i])
output += abs_inp / ( abs_inp + abs(self.weights[i]) )
i+=1
if self.digital_out:
return 1 if output > self.threshold else 0
else:
return output * self.output_sign
elif self.funcNum == 2: #RelU
output = 0
i = 0
while i < self.weightsCount:
output += ( input[i] + self.weights[i] ) * self.weights[i] #закоментить, если черви сойдут с ума
| |
"""General functions and classes to support PsychoPy experiments."""
from __future__ import division
import os
import sys
import time
import json
import socket
import warnings
import argparse
import subprocess
from glob import glob
from string import letters
from math import floor
from subprocess import call
from pprint import pformat
import numpy as np
import pandas as pd
from scipy import stats
from numpy.random import RandomState
from psychopy import core, event, visual, sound
from psychopy.monitors import Monitor
from psychopy import logging
class Params(object):
"""Stores all of the parameters needed during the experiment.
Some parameters are set upon initialization from the file 'params.py',
others can be set from the command line.
"""
def __init__(self, mode, p_file='params'):
"""Initializer for the params object.
Parameters
----------
exp_name: string, name of the dict we want from the param file
p_file: string, the name of a parameter file
"""
self.mode = mode
im = __import__(p_file)
self.param_module = im
param_dict = getattr(im, mode)
for key, val in param_dict.iteritems():
setattr(self, key, val)
timestamp = time.localtime()
self.timestamp = time.asctime(timestamp)
self.date = time.strftime("%Y-%m-%d", timestamp)
self.time = time.strftime("%H-%M-%S", timestamp)
self.git_hash = git_hash()
def __repr__(self):
return pformat(self.__dict__)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
def set_by_cmdline(self, arglist):
"""Get runtime parameters off the commandline."""
# Create the parser, set default args
parser = argparse.ArgumentParser()
parser.add_argument("-subject", default="test")
parser.add_argument("-cbid")
parser.add_argument("-run", type=int, default=1)
parser.add_argument("-fmri", action="store_true")
parser.add_argument("-debug", action="store_true")
parser.add_argument("-nolog", action="store_true")
# Add additional arguments by experiment
try:
func_name = self.exp_name + "_cmdline"
arg_func = getattr(self.param_module, func_name)
arg_func(parser)
except AttributeError:
pass
# Parse the arguments
args = parser.parse_args(arglist)
# Add command line args to the class dict
self.__dict__.update(args.__dict__)
if self.debug:
self.full_screen = False
if self.fmri and hasattr(self, "fmri_monitor_name"):
self.monitor_name = self.fmri_monitor_name
if self.fmri and hasattr(self, "fmri_screen_number"):
self.screen_number = self.fmri_screen_number
if self.fmri and hasattr(self, "fmri_resp_keys"):
self.resp_keys = self.fmri_resp_keys
# Build the log file stem with information we now have
# TODO Perhaps do this in a property in case this isn't called
kws = dict(subject=self.subject,
mode=self.mode,
date=self.date,
time=self.time,
run=self.run)
self.log_stem = self.log_template.format(**kws)
def to_text_header(self, fid):
"""Save the parameters to a text file."""
for key, val in self.__dict__.items():
if not key.startswith("_"):
fid.write("# {} : {} \n".format(key, val))
def to_json(self, fname):
"""Save the parameters to a .json"""
data = dict([(k, v) for k, v in self.__dict__.items()
if not k.startswith("_")])
del data["param_module"]
if not fname.endswith(".json"):
fname += ".json"
archive_old_version(fname)
with open(fname, "w") as fid:
json.dump(data, fid, sort_keys=True, indent=4)
class DataLog(object):
"""Holds info about file that gets updated throughout experiment."""
def __init__(self, p, columns):
"""Set things up."""
self.p = p
self.columns = columns
if not p.nolog:
self.init_log(p)
def init_log(self, p, name_stem=None):
# Figure out the name and clear out old files
if name_stem is None:
name_stem = p.log_stem
self.fname = name_stem + ".csv"
archive_old_version(self.fname)
# Save the parameters to json with a similar base filename
p.to_json(name_stem)
# Write the column header
column_string = ",".join(map(str, self.columns)) + "\n"
with open(self.fname, "w") as fid:
fid.write(column_string)
def add_data(self, data_dict):
"""Add a line of data based on a dictionary and stored columns."""
data_list = [str(data_dict.get(col, None)) for col in self.columns]
data_str = ",".join(data_list) + "\n"
if not self.p.nolog:
with open(self.fname, "a") as fid:
fid.write(data_str)
class WindowInfo(object):
"""Container for monitor information."""
def __init__(self, params):
"""Extracts monitor information from params file and monitors.py."""
try:
mod = __import__("monitors")
except ImportError:
sys.exit("Could not import monitors.py in this directory.")
try:
minfo = getattr(mod, params.monitor_name.replace("-", "_"))
except IndexError:
sys.exit("Monitor not found in monitors.py")
fullscreen = params.get("full_screen", True)
size = minfo["size"] if fullscreen else (800, 600)
monitor = Monitor(name=minfo["name"],
width=minfo["width"],
distance=minfo["distance"])
monitor.setSizePix(minfo["size"])
try:
monitor.setGamma(minfo["gamma"])
except AttributeError:
warnings.warn("Could not set monitor gamma table.")
# Convert from cd/m^2 to psychopy rgb color
# Note that this ignores the min luminance of the monitor
window_color = params.mean_luminance / minfo["max_luminance"] * 2 - 1
# Allow for horizontal mirror view of the whole window
if params.fmri and params.get("fmri_mirror_horizontal", False):
viewscale = [-1, 1]
else:
viewscale = [1, 1]
info = dict(units=params.get("monitor_units", "deg"),
screen=params.get("screen", 0),
fullscr=fullscreen,
allowGUI=not fullscreen,
color=window_color,
size=size,
monitor=monitor,
viewScale=viewscale)
if hasattr(params, "blend_mode"):
info["blendMode"] = params.blend_mode
if params.blend_mode == "add":
info["useFBO"] = True
if "refresh_hz" in minfo:
self.refresh_hz = minfo["refresh_hz"]
self.name = params.monitor_name
self.__dict__.update(info)
self.window_kwargs = info
class WaitText(object):
"""A class for showing text on the screen until a key is pressed. """
def __init__(self, win, lines=("Press a key to continue"),
advance_keys=None, quit_keys=None, height=.5, **kwargs):
"""Set the text stimulus information."""
self.win = win
if advance_keys is None:
advance_keys = ["space"]
self.advance_keys = advance_keys
if quit_keys is None:
quit_keys = ["escape", "q"]
self.quit_keys = quit_keys
self.listen_keys = quit_keys + advance_keys
kwargs["height"] = height
n = len(lines)
heights = (np.arange(n)[::-1] - (n / 2 - .5)) * height
texts = []
for line, y in zip(lines, heights):
text = visual.TextStim(win, line, pos=(0, y), **kwargs)
texts.append(text)
self.texts = texts
def draw(self, duration=np.inf, sleep_time=.2):
"""Dislpay text until a key is pressed or until duration elapses."""
clock = core.Clock()
for text in self.texts:
text.draw()
self.win.flip()
t = 0
while t < duration:
t = clock.getTime()
for key in event.getKeys(keyList=self.listen_keys):
if key in self.quit_keys:
core.quit()
elif key in self.advance_keys:
return
time.sleep(sleep_time)
class Fixation(object):
"""Simple fixation point with color as a property."""
def __init__(self, win, p, color="white"):
color = p.get("fix_iti_color", color)
self.win = win
self.dot = visual.Circle(win, interpolate=True,
fillColor=color,
lineColor=color,
size=p.fix_size)
self._color = color
@property
def color(self):
return self._color
@color.setter # pylint: disable-msg=E0102r
def color(self, color):
if color is None:
color = self.win.color
self._color = color
self.dot.setFillColor(color)
self.dot.setLineColor(color)
def draw(self):
self.dot.draw()
class ProgressBar(object):
"""Progress bar to show how far one is in an experiment."""
def __init__(self, win, p):
self.p = p
self.width = width = p.get("prog_bar_width", 5)
self.height = height = p.get("prog_bar_height", .25)
self.position = position = p.get("prog_bar_position", -3)
color = p.get("prog_bar_color", "white")
linewidth = p.get("prog_bar_linewidth", 2)
self.full_verts = np.array([(0, 0), (0, 1),
(1, 1), (1, 0)], np.float)
frame_verts = self.full_verts.copy()
frame_verts[:, 0] *= width
frame_verts[:, 1] *= height
frame_verts[:, 0] -= width / 2
frame_verts[:, 1] += position
self.frame = visual.ShapeStim(win,
fillColor=None,
lineColor=color,
lineWidth=linewidth,
vertices=frame_verts)
self.bar = visual.ShapeStim(win,
fillColor=color,
lineColor=color,
lineWidth=linewidth)
self._prop_completed = 0
@property
def prop_completed(self):
return self._prop_completed
@prop_completed.setter
def prop_completed(self, prop):
self.update_bar(prop)
def update_bar(self, prop):
self._prop_completed = prop
bar_verts = self.full_verts.copy()
bar_verts[:, 0] *= self.width * prop
bar_verts[:, 1] *= self.height
bar_verts[:, 0] -= self.width / 2
bar_verts[:, 1] += self.position
self.bar.vertices = bar_verts
self.bar.setVertices(bar_verts)
def draw(self):
self.bar.draw()
self.frame.draw()
class PresentationLoop(object):
"""Context manager for the main loop of an experiment."""
def __init__(self, win, p=None, log=None, fix=None,
exit_func=None, feedback_func=None,
fileobj=None, tracker=None):
self.p = p
self.win = win
self.fix = fix
self.log = log
self.exit_func = exit_func
self.feedback_func = feedback_func
self.fileobj = fileobj
self.tracker = tracker
def __enter__(self):
if self.p.fmri:
wait_for_trigger(self.win, self.p)
self.fix.draw()
self.win.flip()
wait_check_quit(self.p.equilibrium_trs * self.p.tr)
def __exit__(self, type, value, traceback):
self.win.flip()
if self.fileobj is not None:
self.fileobj.close()
if self.tracker is not None:
self.tracker.shutdown()
if self.feedback_func is not None:
self.feedback_func(self.win, self.p, self.log)
if self.exit_func is not None:
self.exit_func(self.p, self.log)
self.win.close()
class AuditoryFeedback(object):
def __init__(self, play_sounds=True, correct="ding", wrong="signon",
noresp="click", fixbreak="click", nofix="secalert"):
# TODO cleaner to do a more abstract feedback object
self.play_sounds = True
sound_dir = os.path.join(os.path.dirname(__file__), "sounds")
sound_name_dict = dict(correct=correct,
wrong=wrong,
noresp=noresp,
fixbreak=fixbreak,
nofix=nofix)
sound_dict = {}
for event, sound_type in sound_name_dict.items():
if sound is None:
sound_dict[event] = None
else:
fname = os.path.join(sound_dir, sound_type + ".wav")
sound_obj = sound.Sound(fname)
sound_dict[event] = sound_obj
self.sound_dict = sound_dict
def __call__(self, event):
if self.play_sounds:
sound_obj = self.sound_dict[event]
if sound_obj is not None:
sound_obj.play()
def make_common_visual_objects(win, p):
"""Return a dictionary with visual objects that are generally useful."""
# Fixation point
fix = Fixation(win, p)
# Progress bar to show during behavioral breaks
progress = ProgressBar(win, p)
stims = dict(fix=fix, progress=progress)
quit_keys = p.get("quit_keys", ["q", "escape"])
wait_keys = p.get("wait_keys", ["space"])
finish_keys = p.get("finish_keys", ["return"])
# Instructions
if hasattr(p, "instruct_text"):
instruct = WaitText(win, p.instruct_text,
advance_keys=wait_keys,
quit_keys=quit_keys)
stims["instruct"] = instruct
# Text that allows subjects to take a break between blocks
if hasattr(p, "break_text"):
take_break = WaitText(win, p.break_text,
advance_keys=wait_keys,
quit_keys=quit_keys)
stims["break"] = take_break
# Text that alerts subjects to the end of an experimental run
if hasattr(p, "finish_text"):
finish_run = WaitText(win, p.finish_text,
advance_keys=finish_keys,
quit_keys=quit_keys)
stims["finish"] = finish_run
return stims
def archive_old_version(fname):
"""Move a data file to | |
# PyeMap: A python package for automatic identification of electron and hole transfer pathways in proteins.
# Copyright(C) 2017-2020 <NAME>, <NAME>, <NAME> (Boston University, USA)
"""Finds shortest paths in graph given a source and optionally a target node.
Defines implementations of yen's and dijkstra's algorithms for calculating the shortest path(s) from
the source to target/surface exposed residues. Also defines ShortestPath and Branch objects for
organizing the pathways based on their distances and first surface exposed residue reached during
the pathway.
"""
import itertools
import string
import networkx as nx
from functools import total_ordering
@total_ordering
class ShortestPath(object):
"""Data structure used to store shortest paths.
Contains functions for comparison and string representation, both for the user output and
in NGL viewer selection language for the purpose of visualization. Sorting for ShortestPath
objects is done based on the length attribute.
Attributes
----------
path: list of str
List of residue names that make up the shortest path
path_id: list of str
List of residues that make up the shortest path
length: float
Total distance from source to target
selection_strs: list of str
NGL selection strings for visualization
color_list: list of str
Colors of residues in visualization
labeled_atoms: list of str
Atom names which are labeled in NGL visualization
label_texts: list of str
Labels of residues in NGL visualization
"""
def __init__(self, path, length):
'''Initializes ShortestPath object.
Parameters
----------
path: list of str
List of residues that make up the shortest path
length: float
Total distance from source to target
'''
self.path = path
self.length = length
self.path_id = "none"
self.selection_strs = []
self.color_list = []
self.labeled_atoms = []
self.label_texts = []
def __eq__(self, other):
return self.length == other.length
def __lt__(self, other):
return self.length < other.length
def __str__(self):
printline = self.path_id + ": " + \
str(self.path) + " " + str('{:.2f}'.format(round(self.length, 2)))
return printline
def get_path_as_list(self):
original_list = [[self.path_id], self.path, [
str('{:.2f}'.format(round(self.length, 2)))]]
merged = list(itertools.chain(*original_list))
return merged
def set_id(self, path_id):
"""Setter for path_id"""
self.path_id = path_id
def set_visualization(self, selection_strs, color_list, labeled_atoms, label_texts):
'''Saves information needed for NGL visualization.'''
self.selection_strs = selection_strs
self.color_list = color_list
self.labeled_atoms = labeled_atoms
self.label_texts = label_texts
class Branch(object):
"""Data structure used to group shortest paths with a common first surface exposed residue.
Shortest paths are classified into branches based on the first surface exposed residue
reached during the course of the pathway. For example, given a source node A, and
surface exposed nodes B and C, the paths [A,F,B] and [A,F,B,C] will both be part of the
"B" branch. The path [A,E,C] would be part of its own 'C" branch.
Attributes
----------
branch_id: str
Unique identifier for a branch
target: str
Target node which a branch corresponds to
paths: list of :class:`~pyemap.ShortestPath`
List of ShortestPath objects that make up a branch
"""
def __init__(self, branch_id, target):
self.branch_id = branch_id
self.target = target
self.paths = []
def add_path(self, path):
"""Adds a path to the branch and sets the path_id.
Each time a path is added, the paths in the branch are sorted. After sorting, each path is assigned a
path id composed of the branch id and its location in the paths list. For example, the shortest path
in branch 12 would be assigned the id '12a', the second shortest '12b' and so on.
Parameters
----------
path: :class:`~pyemap.ShortestPath`
A ShortestPath from source to a surface exposed residue
"""
letters = list(string.ascii_letters)
if len(self.paths) < len(letters):
self.paths.append(path)
self.paths = sorted(self.paths)
i = 0
while i < len(self.paths) and i < len(letters):
self.paths[i].set_id(str(self.branch_id) + str(letters[i]))
i += 1
def __str__(self):
''' String representation of Branch. First line is: "Branch: `branch_id`" and subsequent lines
are the string representations of each ShortestPath object comprising the Branch.
'''
printline = "Branch: " + str(self.target)
for pt in self.paths:
printline += "\n" + str(pt)
printline += "\n"
return printline
def get_branch_as_list(self):
''' List representation of Branch. First entry is: "Branch: `branch_id`" and the rest of the entries
are the string representations of each ShortestPath object comprising the Branch.
'''
branch_list = []
printline = "Branch: " + str(self.target)
branch_list.append([printline])
for pt in self.paths:
branch_list.append(pt.get_path_as_list())
return branch_list
def _is_parent_pathway(shortest_path, targets):
"""Returns true if ShortestPath is a parent pathway, false if not.
A ShortestPath object is the parent of a branch if its terminal residue is the
only surface exposed residue in the path. For example, if targets=[A,B,C] and
the pathway is [H,I,C], then this pathway is a parent pathway. In contrast, if
the pathway is [H,B,A], then this pathway is not a parent pathway.
Parameters
----------
shortest_path: ShortestPath
ShortestPath object
targets: list of str
List of surface exposed residues
Returns
-------
bool
True if path is ShortestPath object is a parent pathway
"""
count = 0
for res in shortest_path.path:
if res in targets:
count += 1
return count == 1
def _find_branch(pt, targets, branches):
"""Determines which branch a pathway belongs to and returns that branch.
A ShortestPath belongs to a branch if the first surface exposed residue it reaches during the
pathway is the target of that particular branch.
Parameters
----------
pt: ShortestPath
ShortestPath object
targets: list of str
List of surface exposed residues
branches: list of pyemapBranch objects
List of branches already found
Returns
-------
cur_branch: Branch
Branch object that pt belongs to
"""
res = pt.path[0]
count = 0
while res not in targets:
count += 1
res = pt.path[count]
count = 0
cur_branch = branches[0]
while not res == cur_branch.target:
count += 1
cur_branch = branches[count]
return cur_branch
def dijkstras_shortest_paths(G, start, targets):
"""Returns shortest path from source to each surface exposed residue.
Performs Dijkstra's algorithm from the source to each surface exposed residue, finding the
shortest path. The ShortestPath objects are organized into branches based on the first surface
exposed residue reached during the course of the pathway.
Parameters
----------
G: :class:`networkx.Graph`
Undirected, weighted residue graph
start: str
Source node
targets: list of str
List of surface exposed residues
Returns
-------
branches: list of :class:`~pyemap.Branch`
A list of Branch objects representing the groups of pathways found
Raises
------
RuntimeError:
No shortest paths to surface found
"""
shortestPaths = []
for goal in targets:
path = []
try:
path = nx.dijkstra_path(G, start, goal)
except Exception as e:
path = []
if not path == []:
sum = 0
for i in range(0, len(path) - 1): # sum up edge weights
sum += (G[path[i]][path[i + 1]]['weight'])
shortestPaths.append(ShortestPath(path, sum))
shortestPaths = sorted(shortestPaths)
branches = []
# find the parent pathways
for pt in shortestPaths:
if _is_parent_pathway(pt, targets):
path = pt.path
for i in range(0, len(path) - 1):
G[path[i]][path[i + 1]]['color'] = '#778899FF'
G[path[i]][path[i + 1]]['penwidth'] = 6.0
G[path[i]][path[i + 1]]['style'] = 'solid'
G.nodes[path[i]]['penwidth'] = 6.0
G.nodes[path[i + 1]]['penwidth'] = 6.0
# make the nodes look opaque if they are connected to the source
if len(G.nodes[path[i]]['fillcolor']) != 9:
G.nodes[path[i]]['fillcolor'] += 'FF'
G.nodes[path[i]]['color'] = '#708090FF'
if len(G.nodes[path[i + 1]]['fillcolor']) != 9:
G.nodes[path[i + 1]]['fillcolor'] += 'FF'
G.nodes[path[i + 1]]['color'] = '#708090FF'
br = Branch(len(branches) + 1, pt.path[-1])
branches.append(br)
br.add_path(pt)
# find the sub pathways
for pt in shortestPaths:
if not _is_parent_pathway(pt, targets):
_find_branch(pt, targets, branches).add_path(pt)
path = pt.path
for i in range(0, len(path) - 1):
if G[path[i]][path[i + 1]]['color'] != '#778899FF':
G[path[i]][path[i + 1]]['color'] = '#7788995F'
G[path[i]][path[i + 1]]['penwidth'] = 6.0
G[path[i]][path[i + 1]]['style'] = 'solid'
G.nodes[path[i]]['penwidth'] = 6.0
G.nodes[path[i + 1]]['penwidth'] = 6.0
# make the nodes look opaque if they are connected to the source
if len(G.nodes[path[i]]['fillcolor']) != 9:
G.nodes[path[i]]['fillcolor'] += '5F'
G.nodes[path[i]]['color'] = '#7080905F'
if len(G.nodes[path[i + 1]]['fillcolor']) != 9:
G.nodes[path[i + 1]]['fillcolor'] += '5F'
G.nodes[path[i + 1]]['color'] = '#7080905F'
if len(shortestPaths) == 0:
raise RuntimeError("No paths to the surface found.")
return branches
def yens_shortest_paths(G, start, target, max_paths=10):
"""Returns top 5 shortest paths from source to target.
Uses Yen's algorithm to calculate the shortest paths from source to target, writes
out the ShortestPath objects to file, and returns the 5 pathway IDs. In the graph, nodes and
edges that are part of any | |
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def vlan_trunk_allowed(
self, vlan_id):
"""
Allow VLAN on the trunk port
This function runs the following vtysh command:
::
# vlan trunk allowed {vlan_id}
:param vlan_id: <1-4094> VLAN identifier
"""
cmd = [
'vlan trunk allowed {vlan_id}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_vlan_trunk_allowed(
self, vlan_id):
"""
Disallow VLAN on the trunk port
This function runs the following vtysh command:
::
# no vlan trunk allowed {vlan_id}
:param vlan_id: <1-4094> VLAN identifier
"""
cmd = [
'no vlan trunk allowed {vlan_id}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def vlan_trunk_native_tag(
self):
"""
Tag configuration on the trunk port
This function runs the following vtysh command:
::
# vlan trunk native tag
"""
cmd = [
'vlan trunk native tag'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_vlan_trunk_native_tag(
self):
"""
Remove tag configuration on the trunk port
This function runs the following vtysh command:
::
# no vlan trunk native tag
"""
cmd = [
'no vlan trunk native tag'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def vlan_trunk_native(
self, vlan_id):
"""
Native VLAN on the trunk port
This function runs the following vtysh command:
::
# vlan trunk native {vlan_id}
:param vlan_id: <1-4094> VLAN identifier
"""
cmd = [
'vlan trunk native {vlan_id}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_vlan_trunk_native(
self, vlan_id):
"""
Remove native VLAN on the trunk port
This function runs the following vtysh command:
::
# no vlan trunk native {vlan_id}
:param vlan_id: <1-4094> VLAN identifier
"""
cmd = [
'no vlan trunk native {vlan_id}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def lacp_mode_passive(
self):
"""
Sets an interface as LACP passive.
This function runs the following vtysh command:
::
# lacp mode passive
"""
cmd = [
'lacp mode passive'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_lacp_mode_passive(
self):
"""
Sets an LACP passive interface off.
This function runs the following vtysh command:
::
# no lacp mode passive
"""
cmd = [
'no lacp mode passive'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def lacp_mode_active(
self):
"""
Sets an interface as LACP active.
This function runs the following vtysh command:
::
# lacp mode active
"""
cmd = [
'lacp mode active'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_lacp_mode_active(
self):
"""
Sets an LACP active interface off.
This function runs the following vtysh command:
::
# no lacp mode active
"""
cmd = [
'no lacp mode active'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def lacp_fallback(
self):
"""
Enable LACP fallback mode.
This function runs the following vtysh command:
::
# lacp fallback
"""
cmd = [
'lacp fallback'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def hash_l2_src_dst(
self):
"""
Base the hash on l2-src-dst.
This function runs the following vtysh command:
::
# hash l2-src-dst
"""
cmd = [
'hash l2-src-dst'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def hash_l3_src_dst(
self):
"""
Base the hash on l3-src-dst.
This function runs the following vtysh command:
::
# hash l3-src-dst
"""
cmd = [
'hash l3-src-dst'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def hash_l4_src_dst(
self):
"""
Base the hash on l4-src-dst.
This function runs the following vtysh command:
::
# hash l4-src-dst
"""
cmd = [
'hash l4-src-dst'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def lacp_rate_fast(
self):
"""
Set LACP heartbeats are requested at the rate of one per second.
This function runs the following vtysh command:
::
# lacp rate fast
"""
cmd = [
'lacp rate fast'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_lacp_rate_fast(
self):
"""
Set LACP heartbeats slow which is once every 30 seconds.
This function runs the following vtysh command:
::
# no lacp rate fast
"""
cmd = [
'no lacp rate fast'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
class ConfigInterfaceMgmt(ContextManager):
"""
Configure management interface.
pre_commands:
::
['config terminal', 'interface mgmt']
post_commands:
::
['end']
"""
def __init__(self, enode):
self.enode = enode
def __enter__(self):
commands = """\
config terminal
interface mgmt
"""
self.enode.libs.common.assert_batch(
commands,
replace=self.__dict__,
shell='vtysh'
)
return self
def __exit__(self, type, value, traceback):
commands = """\
end
"""
self.enode.libs.common.assert_batch(
commands,
replace=self.__dict__,
shell='vtysh'
)
def ip_static(
self, ip):
"""
Set IP address
This function runs the following vtysh command:
::
# ip static {ip}
:param ip: Interface IP (ipv4 or ipv6) address.
"""
cmd = [
'ip static {ip}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_ip_static(
self, ip):
"""
Unset IP address
This function runs the following vtysh command:
::
# no ip static {ip}
:param ip: Interface IP (ipv4 or ipv6) address.
"""
cmd = [
'no ip static {ip}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def default_gateway(
self, gateway):
"""
Configure the Default gateway address (IPv4 and IPv6)
This function runs the following vtysh command:
::
# default-gateway {gateway}
:param gateway: IP (ipv4 or ipv6) address.
"""
cmd = [
'default-gateway {gateway}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_default_gateway(
self, gateway):
"""
Remove the Default gateway address (IPv4 and IPv6)
This function runs the following vtysh command:
::
# no default-gateway {gateway}
:param gateway: IP (ipv4 or ipv6) address.
"""
cmd = [
'no default-gateway {gateway}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def nameserver(
self, primary_nameserver, secondary_nameserver=''):
"""
Configure the nameserver
This function runs the following vtysh command:
::
# nameserver {primary_nameserver}
:param primary_nameserver: Primary nameserver (ipv4 or ipv6) address.
:param secondary_nameserver: Secondary nameserver (ipv4 or ipv6)
address.
"""
cmd = [
'nameserver {primary_nameserver}'
]
if secondary_nameserver:
cmd.append(
'{}{{secondary_nameserver}}{}'.format(
'', ''
)
)
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_nameserver(
self, primary_nameserver, secondary_nameserver=''):
"""
Configure the nameserver
This function runs the following vtysh command:
::
# no nameserver {primary_nameserver}
:param primary_nameserver: Primary nameserver (ipv4 or ipv6) address.
:param secondary_nameserver: Secondary nameserver (ipv4 or ipv6)
address.
"""
cmd = [
'no nameserver {primary_nameserver}'
]
if secondary_nameserver:
cmd.append(
'{}{{secondary_nameserver}}{}'.format(
'', ''
)
)
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def ip_dhcp(
self):
"""
Set the mode as dhcp.
This function runs the following vtysh command:
::
# ip dhcp
"""
cmd = [
'ip dhcp'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
class ConfigRouterBgp(ContextManager):
"""
BGP configuration.
pre_commands:
::
['config terminal', 'router bgp {asn}']
post_commands:
::
['end']
"""
def __init__(self, enode, asn):
self.enode = enode
self.asn = asn
def __enter__(self):
commands = """\
config terminal
router bgp {asn}
"""
self.enode.libs.common.assert_batch(
commands,
replace=self.__dict__,
shell='vtysh'
)
return self
def __exit__(self, type, value, traceback):
commands = """\
end
"""
self.enode.libs.common.assert_batch(
commands,
replace=self.__dict__,
shell='vtysh'
)
def bgp_router_id(
self, id):
"""
Specifies the BGP router-ID for a BGP Router
This function runs the following vtysh command:
::
# bgp router-id {id}
:param id: <A.B.C.D> IPv4 address
"""
cmd = [
'bgp router-id {id}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
)
if result:
raise determine_exception(result)(result)
def no_bgp_router_id(
self, id):
"""
Removes the BGP router-ID for a BGP Router
This function runs the following vtysh command:
::
# no bgp router-id {id}
:param id: <A.B.C.D> IPv4 address
"""
cmd = [
'no bgp router-id {id}'
]
result = self.enode(
(' '.join(cmd)).format(**locals()),
shell='vtysh'
| |
<filename>tests/test_flatql_parser.py
import pytest
from flatql.parser.ast import FlatQL, QualifiedName, FunctionCall, Comparison, Between, In, IsNull, \
Literal, Like, View, Column, Identifier, Table, BinaryExpression, ParenExpression, CaseExpression, \
LogicalExpression, ArrayLiteral
from flatql.parser.flatql_parser import parse_flatql, parse_qualified_name, parse_join_condition, \
parse_binary_expression
def test_simple_select_syntax():
assert parse_flatql("SELECT table.column FROM table_name") is not None
assert parse_flatql("SELECT table.column AS column_alias FROM table_name") is not None
assert parse_flatql("SELECT table.column AS \"column_alias\" FROM table_name") is not None
assert parse_flatql("SELECT table.column FROM \"select\"") is not None
assert parse_flatql("SELECT table.column, table.column FROM table_name") is not None
assert parse_flatql("SELECT table.column, table.column \n FROM table_name") is not None
assert parse_flatql("SELECT \"table\".\"integer\", table.column \n FROM table_name") is not None
assert parse_flatql("SELECT table.\"select\", table.column \n FROM table_name") is not None
assert parse_flatql("SELECT table.column, count(table.column) FROM table_name") is not None
assert parse_flatql("SELECT table.column, count(table.column) AS column_count FROM table_name") is not None
assert parse_flatql("SELECT table.column, count(table.column) AS \"column_count\" FROM table_name") is not None
assert parse_flatql("SELECT table.column, count_distinct(table.column) FROM table_name") is not None
assert parse_flatql("SELECT table.column, avg(table.column) FROM table_name") is not None
assert parse_flatql("SELECT table.column, sum(table.column) FROM table_name") is not None
assert parse_flatql("SELECT table.column, max(table.column) FROM table_name") is not None
assert parse_flatql("SELECT table.column, min(table.column) FROM table_name") is not None
assert parse_flatql("SELECT table.column, fun(table.column, table.column) FROM table_name") is not None
assert parse_flatql("SELECT table.column, fun(table.column, 10) FROM table_name") is not None
assert parse_flatql("SELECT table.column, fun(table.column, 10, 'a') FROM table_name") is not None
assert parse_flatql("SELECT table.column, count(table) FROM table_name") is not None
assert parse_flatql("SELECT table.column, count(\"table\") FROM table_name") is not None
assert parse_flatql("SELECT 1 + 1 FROM table_name") is not None
assert parse_flatql("SELECT 1 + 1 AS \"binary_alias\" FROM table_name") is not None
assert parse_flatql("SELECT (1 + 1) FROM table_name") is not None
assert parse_flatql("SELECT (1 + 1) AS \"binary_alias\" FROM table_name") is not None
assert parse_flatql("SELECT table.column + 1 FROM table_name") is not None
assert parse_flatql("SELECT table.column + 1 AS \"binary_alias\" FROM table_name") is not None
assert parse_flatql("SELECT table.column + table.column1 FROM table_name") is not None
assert parse_flatql("SELECT table.column + table.column1 AS \"binary_alias\" FROM table_name") is not None
assert parse_flatql("SELECT table.column + table.column1 * 2 / table.column3 AS \"binary_alias\" FROM table_name") is not None
assert parse_flatql("SELECT toYear(table.column) + table.column + 1 FROM table_name") is not None
assert parse_flatql("SELECT toYear(table.column) + count(table.column1) + 1 FROM table_name") is not None
with pytest.raises(SyntaxError):
parse_flatql("SELECT table.column FROM select")
with pytest.raises(SyntaxError):
parse_flatql("SELECT select.column FROM select")
with pytest.raises(SyntaxError):
parse_flatql("SELECT table.column FROM ")
with pytest.raises(SyntaxError):
parse_flatql("SELECT table.column FROM 0table")
with pytest.raises(SyntaxError):
parse_flatql("SELECT table.0column FROM table")
with pytest.raises(SyntaxError):
parse_flatql("SELECT select.integer FROM table")
def test_predicate_syntax():
assert parse_flatql("SELECT table.column FROM \"table_name\" WHERE table.column = 'test'") is not None
assert parse_flatql("SELECT table.column FROM \"table_name\" WHERE table.column = 'test'") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE \"table\".column != 'test'") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE \"from\".\"integer\" = 1") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column = 1.01") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column = -1.01") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column > -1.01") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column < -1.01") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column >= -1.01") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column <= -1.01") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column IS NULL") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE fun(table.column) IS NULL") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column IS NOT NULL") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column IN ('a', 'a')") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column IN (1, 1)") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column IN (1.0, 1.0)") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE fun(table.column) IN (1.0, 1.0)") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column NOT IN (1.0, 1.0)") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column LIKE 'aaa'") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE fun(table.column) LIKE 'aaa'") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column NOT LIKE 'aaa'") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column BETWEEN 1 AND 2") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE fun(table.column) BETWEEN 1 AND 2") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column NOT BETWEEN 1 AND 2") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE count(table.column) > 10") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE count(\"select\".\"integer\") > 10") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.column IN (1.0, 1.0) "
"AND count(table.column) > 10") is not None
assert parse_flatql(
"SELECT table.column FROM table_name WHERE table.column <= -1.01 AND table.column <= -1.01") is not None
assert parse_flatql(
"SELECT table.column FROM table_name WHERE table.column <= -1.01 AND table.column BETWEEN -1.01 AND -1 AND"
" table.column <= -1.01 AND table.column LIKE 'aaa' AND table.column IN ('a', 'a')") is not None
with pytest.raises(SyntaxError):
parse_flatql("SELECT table.column FROM table WHERE select.name = 'abc'")
with pytest.raises(SyntaxError):
parse_flatql("SELECT table.column FROM table WHERE select.name AS table = 'abc'")
def test_order_by_syntax():
assert parse_flatql("SELECT table.column FROM table_name ORDER BY \"table\"") is not None
assert parse_flatql("SELECT table.column FROM table_name WHERE table.name = 'abc'"
" ORDER BY \"table\" DESC, \"table1\" ASC") is not None
assert parse_flatql("SELECT table.column FROM table_name ORDER BY \"table1\" ASC OFFSET 10") is not None
assert parse_flatql("SELECT table.column FROM table_name ORDER BY \"table1\" ASC LIMIT 100 OFFSET 10") is not None
with pytest.raises(SyntaxError):
parse_flatql("SELECT table.column FROM table_name ORDER BY \"table1\" ASC OFFSET 10.0 ")
with pytest.raises(SyntaxError):
parse_flatql("SELECT table.column FROM table_name ORDER BY table.name ASC LIMIT 10 ")
def test_comment_syntax():
assert parse_flatql("/* comment */ SELECT table.column FROM table_name ORDER BY \"abc\"") is not None
assert parse_flatql("-- comment \n SELECT table.column FROM table_name ORDER BY \"abc\"") is not None
with pytest.raises(SyntaxError):
parse_flatql("/* comment * SELECT table.column FROM table_name ORDER BY \"abc\"")
# noinspection PyUnresolvedReferences
def test_projection_parser1():
statement: FlatQL = parse_flatql(
"SELECT \"table\".\"column_1\", "
" table.column_2"
" FROM \"table\"")
assert statement is not None
assert statement.projections is not None
assert len(statement.projections) == 2
assert isinstance(statement.view, View)
assert statement.view.name._value == '"table"'
assert statement.view.name.value() == 'table'
assert isinstance(statement.projections[0], Column)
assert isinstance(statement.projections[1], Column)
assert statement.projections[0].table.name._value == '"table"'
assert statement.projections[0].name._value == '"column_1"'
assert statement.projections[0].table.name.value() == 'table'
assert statement.projections[0].name.value() == 'column_1'
def test_projection_parser2():
statement: FlatQL = parse_flatql(
"SELECT count(table.column), "
" count(\"table\".column) "
" FROM \"table\"")
assert statement is not None
assert statement.projections is not None
assert len(statement.projections) == 2
assert isinstance(statement.projections[0], FunctionCall)
assert isinstance(statement.projections[1], FunctionCall)
assert statement.projections[0].name == 'count'
assert len(statement.projections[0].args) == 1
assert isinstance(statement.projections[0].args[0], Column)
assert not statement.projections[0].args[0].table.name._is_quoted
assert statement.projections[0].args[0].table.name.value() == "table"
assert not statement.projections[0].args[0].name._is_quoted
assert statement.projections[0].args[0].name.value() == "column"
assert statement.projections[1].name == 'count'
assert len(statement.projections[1].args) == 1
assert isinstance(statement.projections[1].args[0], Column)
assert statement.projections[1].args[0].table.name._is_quoted
assert statement.projections[1].args[0].table.name.value() == "table"
def test_projection_parser3():
statement: FlatQL = parse_flatql(
"SELECT count(\"table\".column, 1, 'abc') "
" FROM \"table\"")
assert statement is not None
assert statement.projections is not None
assert len(statement.projections) == 1
assert isinstance(statement.projections[0], FunctionCall)
assert statement.projections[0].name == 'count'
assert len(statement.projections[0].args) == 3
assert isinstance(statement.projections[0].args[0], Column)
assert statement.projections[0].args[0].table.name.value() == "table"
assert statement.projections[0].args[0].name.value() == "column"
assert isinstance(statement.projections[0].args[1], Literal)
assert statement.projections[0].args[1].value() == '1'
assert isinstance(statement.projections[0].args[2], Literal)
assert statement.projections[0].args[2].value() == 'abc'
def test_projection_parser4():
statement: FlatQL = parse_flatql(
"SELECT count(table), count(\"where\") "
" FROM \"table\"")
assert statement is not None
assert statement.projections is not None
assert len(statement.projections) == 2
assert isinstance(statement.projections[0], FunctionCall)
assert isinstance(statement.projections[1], FunctionCall)
assert statement.projections[0].name == 'count'
assert len(statement.projections[0].args) == 1
assert isinstance(statement.projections[0].args[0], Table)
assert statement.projections[0].args[0].name.value() == "table"
assert statement.projections[1].name == 'count'
assert len(statement.projections[1].args) == 1
assert isinstance(statement.projections[1].args[0], Table)
assert statement.projections[1].args[0].name.value() == "where"
def test_projection_parser5():
statement: FlatQL = parse_flatql(
"SELECT table.column + table.column2 AS \"abc\""
" FROM \"table\"")
assert statement is not None
assert statement.projections is not None
assert len(statement.projections) == 1
assert isinstance(statement.projections[0], BinaryExpression)
def test_projection_parser6():
statement: FlatQL = parse_flatql(
"SELECT (table.column + table.column2) AS \"abc\""
" FROM \"table\"")
assert statement is not None
assert statement.projections is not None
assert len(statement.projections) == 1
assert isinstance(statement.projections[0], ParenExpression)
def test_projection_parser7():
statement: FlatQL = parse_flatql(
"SELECT (table.column + table.column2) * count(table.column) AS \"abc\""
| |
* 4, self.expr.name.repeat(4)),
(lambda s: s[1:], self.expr.name.slice(1)),
(lambda s: s[1: 6], self.expr.name.slice(1, 6)),
(lambda s: s.title(), self.expr.name.title()),
(lambda s: s.rjust(20, '0'), self.expr.name.zfill(20)),
]
fields = [it[1].rename('id'+str(i)) for i, it in enumerate(methods_to_fields)]
expr = self.expr[fields]
res = self.engine.execute(expr)
result = self._get_result(res)
for i, it in enumerate(methods_to_fields):
method = it[0]
if i != 2:
first = [method(it[0]) for it in data]
else:
# cat
first = [method(it) for it in data]
second = [it[i] for it in result]
self.assertEqual(first, second)
def testDatetime(self):
data = self._gen_data(5)
def date_value(sel):
if isinstance(sel, six.string_types):
fun = lambda v: getattr(v, sel)
else:
fun = sel
col_id = [idx for idx, col in enumerate(self.schema.names) if col == 'birth'][0]
return [fun(row[col_id]) for row in data]
methods_to_fields = [
(partial(date_value, 'year'), self.expr.birth.year),
(partial(date_value, 'month'), self.expr.birth.month),
(partial(date_value, 'day'), self.expr.birth.day),
(partial(date_value, 'hour'), self.expr.birth.hour),
(partial(date_value, 'minute'), self.expr.birth.minute),
(partial(date_value, 'second'), self.expr.birth.second),
(partial(date_value, lambda d: d.isocalendar()[1]), self.expr.birth.weekofyear),
(partial(date_value, lambda d: d.weekday()), self.expr.birth.dayofweek),
(partial(date_value, lambda d: d.weekday()), self.expr.birth.weekday),
(partial(date_value, lambda d: time.mktime(d.timetuple())), self.expr.birth.unix_timestamp),
(partial(date_value, lambda d: datetime.combine(d.date(), datetime.min.time())), self.expr.birth.date),
]
fields = [it[1].rename('birth'+str(i)) for i, it in enumerate(methods_to_fields)]
expr = self.expr[fields]
res = self.engine.execute(expr)
result = self._get_result(res)
for i, it in enumerate(methods_to_fields):
method = it[0]
first = method()
try:
import pandas as pd
def conv(v):
if isinstance(v, pd.Timestamp):
v = v.to_datetime()
if isinstance(v, datetime):
return v.replace(tzinfo=None)
return v
except ImportError:
conv = lambda v: v
second = [conv(it[i]) for it in result]
self.assertEqual(first, second)
def testSortDistinct(self):
data = [
['name1', 4, None, None, None],
['name2', 2, None, None, None],
['name1', 4, None, None, None],
['name1', 3, None, None, None],
]
self._gen_data(data=data)
expr = self.expr.sort(['name', -self.expr.id]).distinct(['name', lambda x: x.id + 1])[:50]
res = self.engine.execute(expr)
result = self._get_result(res)
self.assertEqual(len(result), 3)
expected = [
['name1', 5],
['name1', 4],
['name2', 3]
]
self.assertEqual(sorted(expected), sorted(result))
def testPivotTable(self):
data = [
['name1', 1, 1.0, True, None],
['name1', 1, 5.0, True, None],
['name1', 2, 2.0, True, None],
['name2', 1, 3.0, False, None],
['name2', 3, 4.0, False, None]
]
self._gen_data(data=data)
expr = self.expr
expr1 = expr.pivot_table(rows='name', values='fid')
res = self.engine.execute(expr1)
result = self._get_result(res)
expected = [
['name1', 8.0 / 3],
['name2', 3.5],
]
self.assertListAlmostEqual(sorted(result), sorted(expected), only_float=False)
expr2 = expr.pivot_table(rows='name', values='fid', aggfunc=['mean', 'sum'])
res = self.engine.execute(expr2)
result = self._get_result(res)
expected = [
['name1', 8.0 / 3, 8.0],
['name2', 3.5, 7.0],
]
self.assertEqual(res.schema.names, ['name', 'fid_mean', 'fid_sum'])
self.assertListAlmostEqual(sorted(result), sorted(expected), only_float=False)
expr5 = expr.pivot_table(rows='id', values='fid', columns='name', aggfunc=['mean', 'sum'])
expr6 = expr5['name1_fid_mean',
expr5.groupby(Scalar(1)).sort('name1_fid_mean').name1_fid_mean.astype('float').cumsum()]
k = lambda x: list(0 if it is None else it for it in x)
expected = [
[2, 2], [3, 5], [None, 5]
]
res = self.engine.execute(expr6)
result = self._get_result(res)
self.assertEqual(sorted(result, key=k), sorted(expected, key=k))
expr3 = expr.pivot_table(rows='id', values='fid', columns='name', fill_value=0).distinct()
res = self.engine.execute(expr3)
result = self._get_result(res)
expected = [
[2, 0, 2.0],
[3, 4.0, 0],
[1, 3.0, 3.0],
]
self.assertEqual(res.schema.names, ['id', 'name2_fid_mean', 'name1_fid_mean'])
self.assertEqual(result, expected)
expr7 = expr.pivot_table(rows='id', values='fid', columns='name', aggfunc=['mean', 'sum']).cache()
self.assertEqual(len(self.engine.execute(expr7)), 3)
expr8 = self.expr.pivot_table(rows='id', values='fid', columns='name')
self.assertEqual(len(self.engine.execute(expr8)), 3)
self.assertNotIsInstance(expr8.schema, DynamicSchema)
expr9 =(expr8['name1_fid_mean'] - expr8['name2_fid_mean']).rename('substract')
self.assertEqual(len(self.engine.execute(expr9)), 3)
expr10 = expr8.distinct()
self.assertEqual(len(self.engine.execute(expr10)), 3)
def testGroupbyAggregation(self):
data = [
['name1', 4, 5.3, None, None],
['name2', 2, 3.5, None, None],
['name1', 4, 4.2, None, None],
['name1', 3, 2.2, None, None],
['name1', 3, 4.1, None, None],
]
self._gen_data(data=data)
field = self.expr.groupby('name').sort(['id', -self.expr.fid]).row_number()
expr = self.expr['name', 'id', 'fid', field]
res = self.engine.execute(expr)
result = self._get_result(res)
expected = [
['name1', 3, 4.1, 1],
['name1', 3, 2.2, 2],
['name1', 4, 5.3, 3],
['name1', 4, 4.2, 4],
['name2', 2, 3.5, 1],
]
result = sorted(result, key=lambda k: (k[0], k[1], -k[2]))
self.assertEqual(expected, result)
expr = self.expr.name.value_counts(dropna=True)[:25]
expected = [
['name1', 4],
['name2', 1]
]
res = self.engine.execute(expr)
result = self._get_result(res)
self.assertEqual(expected, result)
expr = self.expr.name.topk(25)
res = self.engine.execute(expr)
result = self._get_result(res)
self.assertEqual(expected, result)
expr = self.expr.groupby('name').count()
res = self.engine.execute(expr)
result = self._get_result(res)
self.assertEqual(sorted([it[1:] for it in expected]), sorted(result))
expected = [
['name1', 2],
['name2', 1]
]
expr = self.expr.groupby('name').id.nunique()
res = self.engine.execute(expr)
result = self._get_result(res)
self.assertEqual([it[1:] for it in expected], result)
expr = self.expr[self.expr['id'] > 2].name.value_counts()[:25]
expected = [
['name1', 4]
]
res = self.engine.execute(expr)
result = self._get_result(res)
self.assertEqual(expected, result)
expr = self.expr.groupby('name', Scalar(1).rename('constant')) \
.agg(id=self.expr.id.sum())
expected = [
['name1', 1, 14],
['name2', 1, 2]
]
res = self.engine.execute(expr)
result = self._get_result(res)
self.assertEqual(expected, result)
expr = self.expr[:1]
expr = expr.groupby('name').agg(expr.id.sum())
res = self.engine.execute(expr)
result = self._get_result(res)
expected = [
['name1', 4]
]
self.assertEqual(expected, result)
def testProjectionGroupbyFilter(self):
data = [
['name1', 4, 5.3, None, None],
['name2', 2, 3.5, None, None],
['name1', 4, 4.2, None, None],
['name1', 3, 2.2, None, None],
['name1', 3, 4.1, None, None],
]
self._gen_data(data=data)
df = self.expr.copy()
df['id'] = df.id + 1
df2 = df.groupby('name').agg(id=df.id.sum())[lambda x: x.name == 'name2']
expected = [['name2', 3]]
res = self.engine.execute(df2)
result = self._get_result(res)
self.assertEqual(expected, result)
def testJoinGroupby(self):
data = [
['name1', 4, 5.3, None, None],
['name2', 2, 3.5, None, None],
['name1', 4, 4.2, None, None],
['name1', 3, 2.2, None, None],
['name1', 3, 4.1, None, None],
]
schema2 = Schema.from_lists(['name', 'id2', 'id3'],
[types.string, types.bigint, types.bigint])
table_name = tn('pyodps_test_engine_table2')
self.odps.delete_table(table_name, if_exists=True)
table2 = self.odps.create_table(name=table_name, schema=schema2)
expr2 = CollectionExpr(_source_data=table2, _schema=odps_schema_to_df_schema(schema2))
self._gen_data(data=data)
data2 = [
['name1', 4, -1],
['name2', 1, -2]
]
self.odps.write_table(table2, 0, data2)
expr = self.expr.join(expr2, on='name')[self.expr]
expr = expr.groupby('id').agg(expr.fid.sum())
res = self.engine.execute(expr)
result = self._get_result(res)
id_idx = [idx for idx, col in enumerate(self.expr.schema.names) if col == 'id'][0]
fid_idx = [idx for idx, col in enumerate(self.expr.schema.names) if col == 'fid'][0]
expected = [[k, sum(v[fid_idx] for v in row)]
for k, row in itertools.groupby(sorted(data, key=lambda r: r[id_idx]), lambda r: r[id_idx])]
for it in zip(sorted(expected, key=lambda it: it[0]), sorted(result, key=lambda it: it[0])):
self.assertAlmostEqual(it[0][0], it[1][0])
self.assertAlmostEqual(it[0][1], it[1][1])
def testFilterGroupby(self):
data = [
['name1', 4, 5.3, None, None],
['name2', 2, 3.5, None, None],
['name1', 4, 4.2, None, None],
['name1', 3, 2.2, None, None],
['name1', 3, 4.1, None, None],
]
self._gen_data(data=data)
expr = self.expr.groupby(['name']).agg(id=self.expr.id.max())[lambda x: x.id > 3]
res = self.engine.execute(expr)
result = self._get_result(res)
self.assertEqual(len(result), 1)
expected = [
['name1', 4]
]
self.assertEqual(expected, result)
def testWindowFunction(self):
data = [
['name1', 4, 5.3, None, None],
['name2', 2, 3.5, None, None],
['name1', 4, 4.2, None, None],
['name1', 3, 2.2, None, None],
['name1', 3, 6.1, None, None],
]
self._gen_data(data=data)
expr = self.expr.groupby('name').id.cumsum()
res = self.engine.execute(expr)
result = self._get_result(res)
expected = [[14]] * 4 + [[2]]
self.assertEqual(sorted(expected), sorted(result))
expr = self.expr.groupby('name').sort('fid').id.cummax()
res = self.engine.execute(expr)
result = self._get_result(res)
expected = [[3], [4], [4], [4], [2]]
self.assertEqual(sorted(expected), sorted(result))
expr = self.expr[
self.expr.groupby('name', 'id').sort('fid').id.cummean(),
]
res = self.engine.execute(expr)
result = self._get_result(res)
expected = [
[3], [3], [4], [4], [2]
]
self.assertEqual(sorted(expected), sorted(result))
expr = self.expr.groupby('name').mutate(id2=lambda x: x.id.cumcount(),
fid2=lambda x: x.fid.cummin(sort='id'))
res = self.engine.execute(expr['name', 'id2', 'fid2'])
result = self._get_result(res)
expected = [
['name1', 4, 2.2],
['name1', 4, 2.2],
['name1', 4, 2.2],
['name1', 4, 2.2],
['name2', 1, 3.5],
]
self.assertEqual(sorted(expected), sorted(result))
expr = self.expr[
self.expr.id,
self.expr.groupby('name').rank('id'),
self.expr.groupby('name').dense_rank('fid', ascending=False),
self.expr.groupby('name').row_number(sort=['id', 'fid'], ascending=[True, False]),
self.expr.groupby('name').percent_rank('id'),
]
res = self.engine.execute(expr)
result = self._get_result(res)
expected = [
[4, 3, 2, 3, float(2) / 3],
[2, 1, 1, 1, 0.0],
[4, 3, 3, 4, float(2) / 3],
[3, 1, 4, 2, float(0) / 3],
[3, 1, 1, 1, float(0) / 3]
]
[self.assertListAlmostEqual(l, r) for l, r in zip(sorted(expected), sorted(result))]
expr = self.expr[
self.expr.id,
self.expr.groupby('name').id.lag(offset=3, default=0, sort=['id', 'fid']).rename('id2'),
self.expr.groupby('name').id.lead(offset=1, default=-1,
sort=['id', 'fid'], ascending=[False, False]).rename('id3'),
]
res = self.engine.execute(expr)
result = self._get_result(res)
expected = [
[4, 3, 4],
[2, 0, -1],
[4, 0, 3],
[3, 0, -1],
[3, 0, 3]
]
self.assertEqual(sorted(expected), sorted(result))
def testWindowRewrite(self):
data = [
['name1', 4, 5.3, None, None],
['name2', 2, 3.5, None, None],
['name1', 4, 4.2, None, None],
['name1', 3, 2.2, None, None],
['name1', 3, 4.1, None, None],
]
self._gen_data(data=data)
expr = self.expr[self.expr.id - self.expr.id.mean() < 10][
[lambda x: x.id - x.id.max()]][[lambda x: x.id - x.id.min()]][lambda x: x.id - x.id.std() > 0]
res = self.engine.execute(expr)
result = self._get_result(res)
id_idx = [idx for idx, col in enumerate(self.expr.schema.names) if col == 'id'][0]
expected = [r[id_idx] for r in data]
maxv = max(expected)
expected = [v - maxv for v in expected]
minv = min(expected)
expected = [v - minv for v in expected]
| |
import csv
import datetime
import logging
import time
from io import StringIO
import numpy as np
import pandas as pd
from django.db.models import Q
from django.utils import timezone
from iotile_cloud.utils.gid import IOTileBlockSlug, IOTileDeviceSlug, IOTileStreamSlug, IOTileVariableSlug
from apps.physicaldevice.models import Device
from apps.property.models import GenericProperty
from apps.sqsworker.exceptions import WorkerActionHardError
from apps.stream.models import StreamId, StreamVariable
from apps.streamdata.utils import get_stream_output_mdo
from apps.utils.aws.redshift import get_ts_from_redshift
from apps.utils.data_helpers.manager import DataManager
from apps.utils.gid.convert import get_device_and_block_by_did, gid2int
from apps.utils.iotile.variable import SYSTEM_VID
from apps.utils.objects.utils import get_device_or_block
from apps.utils.timezone_utils import convert_to_utc, str_to_dt_utc
from ..base import ReportGenerator
_TRIP_SUMMARY_VID = gid2int(SYSTEM_VID['TRIP_SUMMARY'])
logger = logging.getLogger(__name__)
def dt_format(dt):
return dt.strftime('%Y-%m-%d %H:%M:%S')
class TripSummary(object):
""" Represents a Trip Summary
"""
_lid_map = {
'5020': 's_events',
'5021': 's_pressure',
'5022': 's_humidity',
'5023': 's_temp',
SYSTEM_VID['TRIP_SUMMARY']: 's_summary', # Trip Report Summary
}
s_temp = None
s_humidity = None
s_pressure = None
s_events = None
s_start = None
s_summary = None
device_or_block_slug = ''
# project_slug = ''
ts_start = None
ts_end = None
data = None
no_start_trip = False
data_was_masked = False
device_or_block = None
def __init__(self, device_or_block):
self.device_or_block = device_or_block
try:
self.device_or_block_slug = IOTileDeviceSlug(device_or_block.slug)
except ValueError:
self.device_or_block_slug = IOTileBlockSlug(device_or_block.slug)
# self.project_slug = IOTileProjectSlug(p_slug)
self.s_temp = None
self.s_humidity = None
self.s_pressure = None
self.s_events = None
self.s_start = None
self.s_end = None
self.s_summary = None
self.ts_start = None
self.ts_end = None
self.data = None
self.no_start_trip = False
self.data_was_masked = False
@classmethod
def compute_time_active(cls, df, condition_met_count):
"""
This hard-to-name function is used to take a dataframe, and a condition_met_count
representing the number of rows that meet some condition (e.g. df['value'] < 17).sum())
The function uses the first and last index to determine the time delta between
and with it, the average time delta between values.
It then computes the amount of time where the given condition was met.
This is used to compute, for example, the amount of time that a POD was under 17C
or above 30C
:param df: DataFrame with a 'value' column and timestamp index
:param condition_met_count: Number of rows that meet condition
:return: string representation of the datetime.timedelta
"""
first_value = df.iloc[0].name
last_value = df.iloc[-1].name
delta = last_value - first_value
count = df['value'].count()
if int(condition_met_count):
time_in_condition = delta / int(count - 1) * int(condition_met_count)
result = str(time_in_condition.to_pytimedelta())
else:
result = '0:00:00'
return result
def _get_stream_slug_for(self, variable):
stream_slug = self.device_or_block.get_stream_slug_for(variable)
return str(stream_slug)
def _q_by_stream(self, stream_slug):
"""Create QuerySet filter with datetime ranges if available"""
q = Q(stream_slug=stream_slug)
if self.ts_start:
q = q & Q(timestamp__gte=get_ts_from_redshift(self.ts_start))
if self.ts_end:
q = q & Q(timestamp__lte=get_ts_from_redshift(self.ts_end))
logger.info('--> {}'.format(q))
return q
def add_stream(self, lid, stream):
if lid in self._lid_map:
self.__setattr__(self._lid_map[lid], stream)
def _get_time_dataframe(self, stream):
qs = DataManager.df_filter_qs_using_q('data', self._q_by_stream(stream.slug))
df = qs.to_dataframe(['value', ], index='timestamp')
mdo = get_stream_output_mdo(stream)
if mdo:
try:
df['value'] = df['value'].apply(lambda x: mdo.compute(x))
except Exception as e:
raise WorkerActionHardError(e)
return df
def _compute_basic_env_stats(self, name, df, units):
stats = df.agg(['min', 'median', 'max'])
if not stats.empty:
return {
'Max {} ({})'.format(name, units): stats.loc['max'].values[0],
'Min {} ({})'.format(name, units): stats.loc['min'].values[0],
'Median {} ({})'.format(name, units): stats.loc['median'].values[0],
}
return {}
def _compute_delta_v(self, x):
terms = [x[term] for term in self._delta_v_terms]
max_dv = max(*terms)
min_dv = min(*terms)
if max_dv > abs(min_dv):
return max_dv
return min_dv
def _compute_event_data(self, event_qs, sg_config_consts):
dt_index = pd.to_datetime([x.timestamp for x in event_qs])
extra_data = [x.extra_data for x in event_qs]
assert 'max_g_col' in sg_config_consts
assert 'max_dv_col' in sg_config_consts
max_g_col = sg_config_consts['max_g_col']
max_dv_col = sg_config_consts['max_dv_col']
df = pd.DataFrame(extra_data, index=dt_index)
# For Saver backwards compatibility, look for alternative labels
if max_g_col not in list(df):
max_g_col = 'max_peak'
if 'max_g' not in list(df):
data = {
'Max Peak (G)': 'Error: peak or max_g not found'
}
return data
data = {
'First event at (UTC)': dt_format(df.iloc[0].name),
'Last event at (UTC)': dt_format(df.iloc[-1].name),
'Event Count': int(df[max_g_col].count())
}
if self.no_start_trip:
# For backwards compatibility, if there was no start trip, use first/last event for duration
data['Duration (Days)'] = (df.iloc[-1].name - df.iloc[0].name) / datetime.timedelta(days=1)
if max_g_col in list(df):
max_g_idx = df[max_g_col].idxmax()
if 'delta_v_terms' in sg_config_consts:
for col in sg_config_consts['delta_v_terms']:
if col in list(df):
df[col] = df[col].apply(lambda x: x * sg_config_consts['delta_v_multiplier'])
if max_dv_col not in list(df):
self._delta_v_terms = sg_config_consts['delta_v_terms']
df[max_dv_col] = df.apply(self._compute_delta_v, axis=1)
max_dv_idx = df[max_dv_col].idxmax()
data.update({
'TimeStamp(MaxPeak) (UTC)': dt_format(max_g_idx),
'Max Peak (G)': df[max_g_col].loc[max_g_idx].max(),
'DeltaV at Max Peak (in/s)': df[max_dv_col].loc[max_g_idx].max(),
'TimeStamp(MaxDeltaV) (UTC)': dt_format(max_dv_idx),
'MaxDeltaV (in/s)': df[max_dv_col].loc[max_dv_idx].max(),
'Peak at MaxDeltaV (G)': df[max_g_col].loc[max_dv_idx].max(),
})
return data
def _get_mask_event(self):
"""
:return: Dict object if a mask has been set:
{'start': '<datetime_str>', 'end': '<datetime_str>'}.
None if not set
"""
mask_stream_slug = self._get_stream_slug_for(SYSTEM_VID['DEVICE_DATA_MASK'])
if mask_stream_slug:
event = DataManager.filter_qs('event', stream_slug=mask_stream_slug).last()
if event:
assert ('start' in event.extra_data)
assert ('end' in event.extra_data)
return event.extra_data
return None
def calculate_trip_date_ranges(self):
"""
Figure out the trip Start and End times:
1. Check for TripStart and Trip End
2. Check if TripMask is set. If so, use that (if within trip start/end
:return: Nothing
"""
start_trip_stream_slug = self._get_stream_slug_for(SYSTEM_VID['TRIP_START'])
end_trip_stream_slug = self._get_stream_slug_for(SYSTEM_VID['TRIP_END'])
qs = DataManager.filter_qs(
'data',
stream_slug__in=[start_trip_stream_slug, end_trip_stream_slug]
).order_by('streamer_local_id', 'timestamp')
self.ts_start = self.ts_end = None
for d in qs:
if d.stream_slug == start_trip_stream_slug:
self.ts_start = convert_to_utc(d.timestamp)
if d.stream_slug == end_trip_stream_slug:
self.ts_end = convert_to_utc(d.timestamp)
# Check if the device has a data mask. If so, use instead
mask_data = self._get_mask_event()
if mask_data:
if mask_data['start']:
self.ts_start = str_to_dt_utc(mask_data['start'])
self.data_was_masked = True
if mask_data['end']:
self.ts_end = str_to_dt_utc(mask_data['end'])
self.data_was_masked = True
if not self.ts_start:
logger.info('No TripStart data found. Looking for oldest data')
# For backwards compatibility, if no TRIP_START, look for the oldest Event or Data
first_event = DataManager.filter_qs_using_q(
'event',
self._q_by_stream(self.s_events.slug)
).exclude(extra_data__has_key='error').first()
first_temp = DataManager.filter_qs_using_q(
'data',
self._q_by_stream(self.s_temp.slug)
).first()
if first_event and first_temp:
first = first_temp if convert_to_utc(first_temp.timestamp) < convert_to_utc(first_event.timestamp) else first_event
else:
first = first_temp or first_event
if first:
self.ts_start = convert_to_utc(first.timestamp)
self.no_start_trip = True
else:
logger.warning('No TRIP_START or events found')
return
if self.ts_end and self.ts_end < self.ts_start:
# This is the end of a previous trip. Ignore
self.ts_end = None
logger.info('Trip Date Range: {} to {}'.format(
self.ts_start,
self.ts_end if self.ts_end else 'NOW'
))
def _send_debug_info(self):
# Print debug information
msg = 'No Events Found: {}'.format(self.device_or_block_slug)
q = self._q_by_stream(self.s_events.slug)
msg += '\n --> q= {}'.format(str(q))
event_qs = DataManager.filter_qs_using_q(
'event',
self._q_by_stream(self.s_events.slug)
).exclude(extra_data__has_key='error')
msg += '\n--> events.filter(q): {}'.format(event_qs.count())
q = Q(stream_slug=self.s_events.slug)
event_qs = DataManager.filter_qs_using_q(
'event',
q
)
msg += '\n--> events.filter(slug): {}'.format(event_qs.count())
if self.ts_start:
msg += '\n--> start (UTC): {}'.format(convert_to_utc(self.ts_start))
else:
msg += '\n--> start (UTC): Not Available'
if self.ts_end:
msg += '\n--> end (UTC): {}'.format(convert_to_utc(self.ts_end))
else:
msg += '\n--> end (UTC): Not Available'
logger.info(msg)
# Let customer know
def calculate_trip_summary_data(self, sg_config):
data = {
'Device': str(self.device_or_block_slug),
}
if self.s_events:
q = self._q_by_stream(self.s_events.slug)
logger.info('_q_by_stream({}) = {}'.format(self.s_events.slug, q))
event_qs = DataManager.filter_qs_using_q(
'event',
self._q_by_stream(self.s_events.slug)
).exclude(extra_data__has_key='error')
logger.info('--> Trip {} events: {}'.format(self.device_or_block_slug, event_qs.count()))
else:
event_qs = DataManager.none_qs('event')
data['error'] = 'Error: s_events is None'
logger.warning(data['error'])
if self.ts_start and not self.no_start_trip:
data['START (UTC)'] = dt_format(self.ts_start)
if self.ts_end:
data['END (UTC)'] = dt_format(self.ts_end)
data['Duration (Days)'] = (self.ts_end - self.ts_start) / datetime.timedelta(days=1)
else:
data['START (UTC)'] = 'Not Available'
data['END (UTC)'] = 'Not Available'
assert 'START (UTC)' in data
if self.data_was_masked:
data['Notes'] = 'Trip Start and/or End was overwritten by a set device data mask'
if event_qs.count():
if 'consts' in sg_config:
sg_config_consts = sg_config['consts']
data.update(self._compute_event_data(event_qs, sg_config_consts))
else:
logger.warning('No events found')
data['Max Peak (G)'] = 'Error: No events found'
data['Event Count'] = 0
self._send_debug_info()
if self.s_temp:
df = self._get_time_dataframe(self.s_temp)
if not df.empty:
# Compute time delta so we can show how much time the device was
# above or below the required range
data.update(self._compute_basic_env_stats('Temp', df, 'C'))
data['Below 17C'] = TripSummary.compute_time_active(df=df, condition_met_count=(df['value'] < 17).sum())
data['Above 30C'] = TripSummary.compute_time_active(df=df, condition_met_count=(df['value'] > 30).sum())
else:
logger.warning('No Temp stream found')
if self.s_humidity:
df = self._get_time_dataframe(self.s_humidity)
if not df.empty:
data.update(self._compute_basic_env_stats('Humidity', df, '% RH'))
else:
logger.warning('No Humidity stream found')
if self.s_pressure:
df = self._get_time_dataframe(self.s_pressure)
if not df.empty:
data.update(self._compute_basic_env_stats('Pressure', df, 'Mbar'))
else:
logger.warning('No Pressure stream found')
self.data = data
class EndOfTripReportGenerator(ReportGenerator):
_trips = {}
def __init__(self, msgs, rpt, start, end, sources=None):
super(EndOfTripReportGenerator, self).__init__(msgs, rpt, start, end, sources)
self._trips = {}
if sources:
for source in sources:
obj = get_device_or_block(source)
if not obj:
continue
logger.info('creating new TripSummary for device {}'.format(obj.slug))
self._trips[obj.slug] = TripSummary(obj)
def _email_template(self):
return 'report/end_of_trip'
def _create_summary_event(self, trip):
if not trip.s_summary:
# Need to create Summary Stream
if isinstance(trip.device_or_block, Device):
device = trip.device_or_block
project = device.project
block = | |
the "Get Discoveries
by range" API.
Args:
id(basestring): Discovery ID.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(id, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'id': id,
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/discovery/${id}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_4c8cab5f435a80f4_v1_2_10', json_data)
def start_discovery(self,
cdpLevel=None,
discoveryType=None,
enablePasswordList=None,
globalCredentialIdList=None,
httpReadCredential=None,
httpWriteCredential=None,
ipAddressList=None,
ipFilterList=None,
lldpLevel=None,
name=None,
netconfPort=None,
noAddNewDevice=None,
parentDiscoveryId=None,
passwordList=None,
preferredMgmtIPMethod=None,
protocolOrder=None,
reDiscovery=None,
retry=None,
snmpAuthPassphrase=None,
snmpAuthProtocol=None,
snmpMode=None,
snmpPrivPassphrase=None,
snmpPrivProtocol=None,
snmpROCommunity=None,
snmpROCommunityDesc=None,
snmpRWCommunity=None,
snmpRWCommunityDesc=None,
snmpUserName=None,
snmpVersion=None,
timeout=None,
updateMgmtIp=None,
userNameList=None,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Initiates discovery with the given parameters.
Args:
cdpLevel(number): InventoryRequest's cdpLevel.
discoveryType(string): InventoryRequest's discoveryType.
enablePasswordList(list): InventoryRequest's enablePasswordList (list of string, objects).
globalCredentialIdList(list): InventoryRequest's globalCredentialIdList (list of string, objects).
httpReadCredential(object): InventoryRequest's httpReadCredential.
httpWriteCredential(object): InventoryRequest's httpWriteCredential.
ipAddressList(string): InventoryRequest's ipAddressList.
ipFilterList(list): InventoryRequest's ipFilterList (list of string, objects).
lldpLevel(number): InventoryRequest's lldpLevel.
name(string): InventoryRequest's name.
netconfPort(string): InventoryRequest's netconfPort.
noAddNewDevice(boolean): InventoryRequest's noAddNewDevice.
parentDiscoveryId(string): InventoryRequest's parentDiscoveryId.
passwordList(list): InventoryRequest's passwordList (list of string, objects).
preferredMgmtIPMethod(string): InventoryRequest's preferredMgmtIPMethod.
protocolOrder(string): InventoryRequest's protocolOrder.
reDiscovery(boolean): InventoryRequest's reDiscovery.
retry(number): InventoryRequest's retry.
snmpAuthPassphrase(string): InventoryRequest's snmpAuthPassphrase.
snmpAuthProtocol(string): InventoryRequest's snmpAuthProtocol.
snmpMode(string): InventoryRequest's snmpMode.
snmpPrivPassphrase(string): InventoryRequest's snmpPrivPassphrase.
snmpPrivProtocol(string): InventoryRequest's snmpPrivProtocol.
snmpROCommunity(string): InventoryRequest's snmpROCommunity.
snmpROCommunityDesc(string): InventoryRequest's snmpROCommunityDesc.
snmpRWCommunity(string): InventoryRequest's snmpRWCommunity.
snmpRWCommunityDesc(string): InventoryRequest's snmpRWCommunityDesc.
snmpUserName(string): InventoryRequest's snmpUserName.
snmpVersion(string): InventoryRequest's snmpVersion.
timeout(number): InventoryRequest's timeout.
updateMgmtIp(boolean): InventoryRequest's updateMgmtIp.
userNameList(list): InventoryRequest's userNameList (list of string, objects).
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = {
'cdpLevel':
cdpLevel,
'discoveryType':
discoveryType,
'enablePasswordList':
enablePasswordList,
'globalCredentialIdList':
globalCredentialIdList,
'httpReadCredential':
httpReadCredential,
'httpWriteCredential':
httpWriteCredential,
'ipAddressList':
ipAddressList,
'ipFilterList':
ipFilterList,
'lldpLevel':
lldpLevel,
'name':
name,
'netconfPort':
netconfPort,
'noAddNewDevice':
noAddNewDevice,
'parentDiscoveryId':
parentDiscoveryId,
'passwordList':
passwordList,
'preferredMgmtIPMethod':
preferredMgmtIPMethod,
'protocolOrder':
protocolOrder,
'reDiscovery':
reDiscovery,
'retry':
retry,
'snmpAuthPassphrase':
snmpAuthPassphrase,
'snmpAuthProtocol':
snmpAuthProtocol,
'snmpMode':
snmpMode,
'snmpPrivPassphrase':
snmpPrivPassphrase,
'snmpPrivProtocol':
snmpPrivProtocol,
'snmpROCommunity':
snmpROCommunity,
'snmpROCommunityDesc':
snmpROCommunityDesc,
'snmpRWCommunity':
snmpRWCommunity,
'snmpRWCommunityDesc':
snmpRWCommunityDesc,
'snmpUserName':
snmpUserName,
'snmpVersion':
snmpVersion,
'timeout':
timeout,
'updateMgmtIp':
updateMgmtIp,
'userNameList':
userNameList,
}
_payload.update(payload or {})
_payload = dict_from_items_with_values(_payload)
if active_validation:
self._request_validator('jsd_55b439dc4239b140_v1_2_10')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/discovery')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_55b439dc4239b140_v1_2_10', json_data)
def create_snmp_write_community(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Adds global SNMP write community.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_6bacb8d14639bdc7_v1_2_10')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/global-credential/snmpv2-write-'
+ 'community')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_6bacb8d14639bdc7_v1_2_10', json_data)
def create_http_write_credentials(self,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Adds global HTTP write credentials.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_4d9ca8e2431a8a24_v1_2_10')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/global-credential/http-write')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=_params,
json=_payload)
return self._object_factory('bpm_4d9ca8e2431a8a24_v1_2_10', json_data)
def get_network_devices_from_discovery(self,
id,
cli_status=None,
http_status=None,
ip_address=None,
netconf_status=None,
ping_status=None,
snmp_status=None,
sort_by=None,
sort_order=None,
task_id=None,
headers=None,
**request_parameters):
"""Returns the network devices from a discovery job based on given
filters. Discovery ID can be obtained using the "Get
Discoveries by range" API.
Args:
id(basestring): Discovery ID.
task_id(basestring): taskId query parameter.
sort_by(basestring): sortBy query parameter.
sort_order(basestring): sortOrder query parameter.
ip_address(basestring, list, set, tuple): ipAddress query parameter.
ping_status(basestring, list, set, tuple): pingStatus query parameter.
snmp_status(basestring, list, set, tuple): snmpStatus query parameter.
cli_status(basestring, list, set, tuple): cliStatus query parameter.
netconf_status(basestring, list, set, tuple): netconfStatus query parameter.
http_status(basestring, list, set, tuple): httpStatus query parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(task_id, basestring)
check_type(sort_by, basestring)
check_type(sort_order, basestring)
check_type(ip_address, (basestring, list, set, tuple))
check_type(ping_status, (basestring, list, set, tuple))
check_type(snmp_status, (basestring, list, set, tuple))
check_type(cli_status, (basestring, list, set, tuple))
check_type(netconf_status, (basestring, list, set, tuple))
check_type(http_status, (basestring, list, set, tuple))
check_type(id, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'taskId':
task_id,
'sortBy':
sort_by,
'sortOrder':
sort_order,
'ipAddress':
ip_address,
'pingStatus':
ping_status,
'snmpStatus':
snmp_status,
'cliStatus':
cli_status,
'netconfStatus':
netconf_status,
'httpStatus':
http_status,
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'id': id,
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/discovery/${id}/summary')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_3d9b99c343398a27_v1_2_10', json_data)
def update_global_credentials(self,
global_credential_id,
siteUuids=None,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Update global credential for network devices in site(s).
Args:
siteUuids(list): SitesInfoDTO's siteUuids (list of strings).
global_credential_id(basestring): Global credential Uuid.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: | |
Det N_common_animate_dobj PP_loc [0.5]
NP_animate_iobj -> Det N_common_animate_iobj [0.5] | N_prop_iobj [0.5]
NP_animate_nsubj -> Det N_common_animate_nsubj [0.5] | N_prop_nsubj [0.5]
NP_on -> Det N_on PP_loc [0.1] | Det N_on [0.9]
NP_in -> Det N_in PP_loc [0.1] | Det N_in [0.9]
NP_beside -> Det N_beside PP_loc [0.1] | Det N_beside [0.9]
N_on -> {on_nouns_str}
N_in -> {in_nouns_str}
N_beside -> {beside_nouns_str}
Det -> 'the' [0.5] | 'a' [0.5]
C -> 'that' [1.0]
AUX -> 'was' [1.0]
BY -> 'by' [1.0]
N_common_animate_dobj -> {target_item_str}
N_common_animate_iobj -> {animate_nouns_str}
N_common_animate_nsubj -> {animate_nouns_str}
N_common_inanimate_dobj -> {inanimate_nouns_str}
N_prop_iobj -> {proper_nouns_str}
N_prop_nsubj -> {proper_nouns_str}
V_trans_omissible -> {V_trans_omissible_str}
V_trans_not_omissible -> {V_trans_not_omissible_str}
V_cp_taking -> {V_cp_taking_str}
V_inf_taking -> {V_inf_taking_str}
V_unacc -> {V_unacc_str}
V_unerg -> {V_unerg_str}
V_inf -> {V_inf_str}
V_dat -> {V_dat_str}
PP_iobj -> P_iobj NP_animate_iobj [1.0]
PP_loc -> P_on NP_on [0.333] | P_in NP_in [0.333] | P_beside NP_beside [0.334]
P_iobj -> 'to' [1.0]
P_on -> 'on' [1.0]
P_in -> 'in' [1.0]
P_beside -> 'beside' [1.0]
INF -> 'to' [1.0]
""".format(animate_nouns_str=animate_nouns_str,
inanimate_nouns_str=inanimate_nouns_str,
proper_nouns_str=proper_nouns_str,
in_nouns_str=in_nouns_str,
on_nouns_str=on_nouns_str,
beside_nouns_str=beside_nouns_str,
V_trans_omissible_str=V_trans_omissible_str,
V_trans_not_omissible_str=V_trans_not_omissible_str,
V_cp_taking_str=V_cp_taking_str,
V_inf_taking_str=V_inf_taking_str,
V_unacc_str=V_unacc_str,
V_unerg_str=V_unerg_str,
V_inf_str=V_inf_str,
V_dat_str=V_dat_str,
target_item_str='{}'
)
proper_noun_object_grammar_str = """
S -> NP_animate_nsubj VP_external [1.0]
VP_external -> V_unacc NP_dobj [0.18] \
| V_trans_omissible NP_dobj [0.18] | V_trans_not_omissible NP_dobj [0.18] \
| V_dat NP_dobj PP_iobj [0.18] | V_dat NP_animate_iobj NP_dobj [0.18] \
| V_cp_taking C S [0.1]
NP_dobj -> N_prop_dobj [1.0]
NP_animate_iobj -> Det N_common_animate_iobj [0.5] | N_prop_iobj [0.5]
NP_animate_nsubj -> Det N_common_animate_nsubj [0.5] | N_prop_nsubj [0.50]
Det -> 'the' [0.5] | 'a' [0.5]
C -> 'that' [1.0]
AUX -> 'was' [1.0]
BY -> 'by' [1.0]
N_common_animate_iobj -> {animate_nouns_str}
N_common_animate_nsubj -> {animate_nouns_str}
N_common_inanimate_dobj -> {inanimate_nouns_str}
N_prop_iobj -> {proper_nouns_str}
N_prop_nsubj -> {proper_nouns_str}
N_prop_dobj -> {target_item_str}
V_trans_omissible -> {V_trans_omissible_str}
V_trans_not_omissible -> {V_trans_not_omissible_str}
V_cp_taking -> {V_cp_taking_str}
V_inf_taking -> {V_inf_taking_str}
V_unacc -> {V_unacc_str}
V_unerg -> {V_unerg_str}
V_inf -> {V_inf_str}
V_dat -> {V_dat_str}
PP_iobj -> P NP_animate_iobj [1.0]
P -> 'to' [1.0]
INF -> 'to' [1.0]
""".format(animate_nouns_str=animate_nouns_str,
inanimate_nouns_str=inanimate_nouns_str,
proper_nouns_str=proper_nouns_str,
V_trans_omissible_str=V_trans_omissible_str,
V_trans_not_omissible_str=V_trans_not_omissible_str,
V_cp_taking_str=V_cp_taking_str,
V_inf_taking_str=V_inf_taking_str,
V_unacc_str=V_unacc_str,
V_unerg_str=V_unerg_str,
V_inf_str=V_inf_str,
V_dat_str=V_dat_str,
target_item_str='{}'
)
# # Subset of main grammar that only generates target infinitival constructions
# In[ ]:
infinitival_verb_grammar_str = """
S -> NP_animate_nsubj VP_external [1.0]
VP_external -> V_inf_taking INF V_inf [0.96] \
| V_cp_taking C S [0.04]
NP_animate_nsubj -> Det N_common_animate_nsubj [0.5] | N_prop_nsubj [0.5]
Det -> 'the' [0.5] | 'a' [0.5]
C -> 'that' [1.0]
N_common_animate_nsubj -> {animate_nouns_str}
N_prop_nsubj -> {proper_nouns_str}
V_cp_taking -> {V_cp_taking_str}
V_inf_taking -> {V_inf_taking_str}
V_inf -> {target_item_str}
INF -> 'to' [1.0]
""".format(animate_nouns_str=animate_nouns_str,
inanimate_nouns_str=inanimate_nouns_str,
proper_nouns_str=proper_nouns_str,
V_cp_taking_str=V_cp_taking_str,
V_inf_taking_str=V_inf_taking_str,
target_item_str='{}'
)
# # Subset of main grammar that generates a target transitive with D.O.
# In[ ]:
transitive_with_object_grammar_str = """
S -> NP_animate_nsubj VP_external [1.0]
VP_external -> V_trans NP_dobj [0.91] | | V_cp_taking C S [0.09]
NP_dobj -> NP_inanimate_dobj [0.5] | NP_animate_dobj [0.5]
NP_animate_dobj -> Det N_common_animate_dobj [0.25] | Det N_common_animate_dobj PP_loc [0.25] \
| N_prop_dobj [0.50]
NP_animate_nsubj -> Det N_common_animate_nsubj [0.5] | N_prop_nsubj [0.5]
NP_inanimate_dobj -> Det N_common_inanimate_dobj [0.5] | Det N_common_inanimate_dobj PP_loc [0.5]
NP_on -> Det N_on PP_loc [0.1] | Det N_on [0.9]
NP_in -> Det N_in PP_loc [0.1] | Det N_in [0.9]
NP_beside -> Det N_beside PP_loc [0.1] | Det N_beside [0.9]
N_on -> {on_nouns_str}
N_in -> {in_nouns_str}
N_beside -> {beside_nouns_str}
Det -> 'the' [0.5] | 'a' [0.5]
C -> 'that' [1.0]
N_common_animate_dobj -> {animate_nouns_str}
N_common_animate_nsubj -> {animate_nouns_str}
N_common_inanimate_dobj -> {inanimate_nouns_str}
N_prop_dobj -> {proper_nouns_str}
N_prop_nsubj -> {proper_nouns_str}
V_cp_taking -> {V_cp_taking_str}
V_trans -> {target_item_str}
PP_iobj -> P_iobj NP_animate_iobj [1.0]
PP_loc -> P_on NP_on [0.333] | P_in NP_in [0.333] | P_beside NP_beside [0.334]
P_iobj -> 'to' [1.0]
P_on -> 'on' [1.0]
P_in -> 'in' [1.0]
P_beside -> 'beside' [1.0]
""".format(animate_nouns_str=animate_nouns_str,
inanimate_nouns_str=inanimate_nouns_str,
proper_nouns_str=proper_nouns_str,
in_nouns_str=in_nouns_str,
on_nouns_str=on_nouns_str,
beside_nouns_str=beside_nouns_str,
target_item_str='{}',
V_cp_taking_str=V_cp_taking_str
)
# # Subsets of main grammar that only generate target transitives with animate subjects (i.e., agents)
# In[ ]:
transitive_with_animate_subject_grammar_str = """
S -> NP_animate_nsubj_targeted VP_external [0.9] | NP_animate_nsubj VP_CP [0.1]
VP_external -> V_trans_not_omissible_str NP_dobj [1.0]
VP_CP -> V_cp_taking C S [1.0]
NP_dobj -> NP_inanimate_dobj [0.5] | NP_animate_dobj [0.5]
NP_animate_dobj -> Det N_common_animate_dobj [0.25] | Det N_common_animate_dobj PP_loc [0.25] \
| N_prop_dobj [0.50]
NP_animate_nsubj_targeted -> Det N_common_animate_nsubj_targeted [1.0]
NP_animate_nsubj -> Det N_common_animate_nsubj [0.5] | N_prop_nsubj [0.5]
NP_inanimate_dobj -> Det N_common_inanimate_dobj [0.5] | Det N_common_inanimate_dobj PP_loc [0.5]
NP_on -> Det N_on PP_loc [0.1] | Det N_on [0.9]
NP_in -> Det N_in PP_loc [0.1] | Det N_in [0.9]
NP_beside -> Det N_beside PP_loc [0.1] | Det N_beside [0.9]
N_on -> {on_nouns_str}
N_in -> {in_nouns_str}
N_beside -> {beside_nouns_str}
Det -> 'the' [0.5] | 'a' [0.5]
C -> 'that' [1.0]
N_common_animate_dobj -> {animate_nouns_str}
N_common_animate_nsubj -> {animate_nouns_str}
N_common_animate_nsubj_targeted -> {target_item_str}
N_common_inanimate_dobj -> {inanimate_nouns_str}
N_prop_dobj -> {proper_nouns_str}
N_prop_nsubj -> {proper_nouns_str}
V_cp_taking -> {V_cp_taking_str}
V_trans_not_omissible_str -> {V_trans_not_omissible_str}
PP_iobj -> P_iobj NP_animate_iobj [1.0]
PP_loc -> P_on NP_on [0.333] | P_in NP_in [0.333] | P_beside NP_beside [0.334]
P_iobj -> 'to' [1.0]
P_on -> 'on' [1.0]
P_in -> 'in' [1.0]
P_beside -> 'beside' [1.0]
""".format(animate_nouns_str=animate_nouns_str,
inanimate_nouns_str=inanimate_nouns_str,
proper_nouns_str=proper_nouns_str,
in_nouns_str=in_nouns_str,
on_nouns_str=on_nouns_str,
beside_nouns_str=beside_nouns_str,
target_item_str='{}',
V_cp_taking_str=V_cp_taking_str,
V_trans_not_omissible_str=V_trans_not_omissible_str
)
unaccusative_with_animate_subject_grammar_str = """
S -> NP_animate_nsubj VP_external [0.05] | VP_internal [0.95]
VP_external -> V_cp_taking C S [1.0]
VP_internal -> NP_dobj V_unacc [1.0]
NP_dobj -> NP_animate_dobj [1.0]
NP_animate_nsubj -> Det N_common_animate_nsubj [0.5] | N_prop_nsubj [0.5]
NP_animate_dobj -> Det N_common_animate_dobj [1.0]
Det -> 'the' [0.5] | 'a' [0.5]
C -> 'that' [1.0]
N_common_animate_nsubj -> {animate_nouns_str}
N_common_animate_dobj -> {target_item_str}
N_common_inanimate_dobj -> {inanimate_nouns_str}
N_prop_nsubj -> {proper_nouns_str}
V_cp_taking -> {V_cp_taking_str}
V_unacc -> {V_unacc_str}
""".format(animate_nouns_str=animate_nouns_str,
inanimate_nouns_str=inanimate_nouns_str,
proper_nouns_str=proper_nouns_str,
V_cp_taking_str=V_cp_taking_str,
V_unacc_str=V_unacc_str,
target_item_str='{}'
)
# # Grammar with high CP probability (for CP recursion cases; structural gen)
# In[ ]:
cp_high_prob_grammar_str = """
S -> NP_animate_nsubj VP_external [0.01] | VP_internal [0.01] \
| NP_inanimate_nsubjpass VP_passive [0.01] | NP_animate_nsubjpass VP_passive_dat [0.01] \
| NP_animate_nsubj VP_CP [0.96]
VP_CP -> V_cp_taking C S [1.0]
VP_external -> V_unerg [0.125] | V_unacc NP_dobj [0.125] \
| V_trans_omissible [0.125] | V_trans_omissible NP_dobj [0.125] \
| V_trans_not_omissible NP_dobj [0.125] | V_inf_taking INF V_inf [0.125] \
| V_dat NP_inanimate_dobj PP_iobj [0.125] | V_dat NP_animate_iobj NP_inanimate_dobj [0.125]
VP_internal -> NP_unacc_subj V_unacc [1.0]
VP_passive -> AUX V_trans_not_omissible_pp [0.125] | AUX V_trans_not_omissible_pp BY NP_animate_nsubj [0.125] | \
AUX V_trans_omissible_pp [0.125] | AUX V_trans_omissible_pp BY NP_animate_nsubj [0.125] | \
AUX V_unacc_pp [0.125] | AUX V_unacc_pp BY NP_animate_nsubj [0.125] | \
AUX V_dat_pp PP_iobj [0.125] | AUX V_dat_pp PP_iobj BY NP_animate_nsubj [0.125]
VP_passive_dat -> AUX V_dat_pp NP_inanimate_dobj [0.5] | AUX V_dat_pp NP_inanimate_dobj BY NP_animate_nsubj [0.5]
NP_dobj -> NP_inanimate_dobj [0.5] | NP_animate_dobj [0.5]
NP_unacc_subj -> NP_inanimate_dobj_noPP [0.5] | NP_animate_dobj_noPP [0.5]
NP_animate_dobj_noPP -> Det N_common_animate_dobj [0.5] | N_prop_dobj [0.5]
NP_animate_dobj -> Det N_common_animate_dobj [0.25] | Det N_common_animate_dobj PP_loc [0.25] \
| N_prop_dobj [0.50]
NP_animate_iobj -> Det N_common_animate_iobj [0.5] | N_prop_iobj [0.5]
NP_animate_nsubj -> Det N_common_animate_nsubj [0.5] | N_prop_nsubj [0.5]
NP_animate_nsubjpass -> Det N_common_animate_nsubjpass [0.5] | N_prop_nsubjpass [0.5]
NP_inanimate_dobj -> Det N_common_inanimate_dobj [0.5] | Det N_common_inanimate_dobj PP_loc [0.5]
NP_inanimate_dobj_noPP -> Det N_common_inanimate_dobj [1.0]
NP_inanimate_nsubjpass -> Det N_common_inanimate_nsubjpass [1.0]
NP_on -> Det N_on PP_loc [0.1] | Det N_on [0.9]
NP_in -> Det N_in PP_loc [0.1] | Det N_in [0.9]
NP_beside -> Det N_beside PP_loc [0.1] | Det N_beside [0.9]
Det -> 'the' [0.5] | 'a' [0.5]
C -> 'that' [1.0]
AUX -> 'was' [1.0]
BY -> 'by' [1.0]
N_on -> {on_nouns_str}
N_in -> {in_nouns_str}
N_beside -> {beside_nouns_str}
N_common_animate_dobj -> {animate_nouns_str}
N_common_animate_iobj -> {animate_nouns_str}
N_common_animate_nsubj -> {animate_nouns_str}
N_common_animate_nsubjpass -> {animate_nouns_str}
N_common_inanimate_dobj -> {inanimate_nouns_str}
N_common_inanimate_nsubjpass -> {inanimate_nouns_str}
N_prop_dobj -> {proper_nouns_str}
N_prop_iobj -> {proper_nouns_str}
N_prop_nsubj -> {proper_nouns_str}
N_prop_nsubjpass -> {proper_nouns_str}
V_trans_omissible -> {V_trans_omissible_str}
V_trans_omissible_pp -> {V_trans_omissible_pp_str}
V_trans_not_omissible -> {V_trans_not_omissible_str}
V_trans_not_omissible_pp -> {V_trans_not_omissible_pp_str}
V_cp_taking -> {V_cp_taking_str}
V_inf_taking -> {V_inf_taking_str}
V_unacc -> {V_unacc_str}
V_unacc_pp -> {V_unacc_pp_str}
V_unerg -> {V_unerg_str}
V_inf -> {V_inf_str}
V_dat -> {V_dat_str}
V_dat_pp -> {V_dat_pp_str}
PP_iobj -> P_iobj NP_animate_iobj [1.0]
PP_loc -> P_on NP_on [0.333] | P_in NP_in [0.333] | P_beside NP_beside [0.334]
P_iobj -> 'to' [1.0]
P_on -> 'on' [1.0]
P_in -> 'in' [1.0]
P_beside -> 'beside' [1.0]
INF -> 'to' [1.0]
""".format(animate_nouns_str=animate_nouns_str,
inanimate_nouns_str=inanimate_nouns_str,
proper_nouns_str=proper_nouns_str,
in_nouns_str=in_nouns_str,
on_nouns_str=on_nouns_str,
beside_nouns_str=beside_nouns_str,
V_trans_omissible_str=V_trans_omissible_str,
V_trans_omissible_pp_str=V_trans_omissible_pp_str,
V_trans_not_omissible_str=V_trans_not_omissible_str,
V_trans_not_omissible_pp_str=V_trans_not_omissible_pp_str,
V_cp_taking_str=V_cp_taking_str,
V_inf_taking_str=V_inf_taking_str,
V_unacc_str=V_unacc_str,
V_unacc_pp_str=V_unacc_pp_str,
V_unerg_str=V_unerg_str,
V_inf_str=V_inf_str,
V_dat_str=V_dat_str,
V_dat_pp_str=V_dat_pp_str
)
# # Grammar with high PP in object position (for PP recursion cases; structural gen)
# In[ ]:
obj_pp_high_prob_grammar_str = """
S -> NP_animate_nsubj VP_external [0.90] | NP_animate_nsubj VP_CP [0.10]
VP_CP -> V_cp_taking C S [1.0]
VP_external -> V_unacc NP_dobj [0.2] | V_trans_omissible NP_dobj [0.2] \
| V_trans_not_omissible NP_dobj [0.2] \
| V_dat NP_inanimate_dobj PP_iobj [0.2] | V_dat NP_animate_iobj NP_inanimate_dobj [0.2]
NP_dobj -> NP_inanimate_dobj [0.5] | NP_animate_dobj [0.5]
NP_animate_dobj -> Det N_common_animate_dobj PP_loc [1.0]
NP_animate_iobj -> Det N_common_animate_iobj [0.5] | N_prop_iobj [0.5]
NP_animate_nsubj -> Det N_common_animate_nsubj [0.5] | N_prop_nsubj [0.5]
NP_inanimate_dobj -> Det N_common_inanimate_dobj PP_loc [1.0]
NP_on -> Det N_on PP_loc [0.9] | Det N_on [0.1]
NP_in -> Det N_in PP_loc [0.9] | Det N_in [0.1]
NP_beside -> Det N_beside PP_loc [0.9] | Det N_beside [0.1]
Det -> 'the' [0.5] | 'a' [0.5]
C -> 'that' [1.0]
AUX -> 'was' [1.0]
BY -> 'by' [1.0]
N_on -> {on_nouns_str}
N_in -> {in_nouns_str}
N_beside -> {beside_nouns_str}
N_common_animate_dobj -> {animate_nouns_str}
N_common_animate_iobj -> {animate_nouns_str}
N_common_animate_nsubj -> {animate_nouns_str}
N_common_inanimate_dobj -> {inanimate_nouns_str}
N_prop_dobj -> {proper_nouns_str}
N_prop_iobj -> {proper_nouns_str}
N_prop_nsubj -> {proper_nouns_str}
V_trans_omissible -> {V_trans_omissible_str}
V_trans_not_omissible -> {V_trans_not_omissible_str}
V_cp_taking -> {V_cp_taking_str}
V_unacc -> {V_unacc_str}
V_dat -> {V_dat_str}
PP_iobj -> P_iobj NP_animate_iobj [1.0]
PP_loc -> P_on NP_on [0.333] | P_in NP_in [0.333] | P_beside NP_beside [0.334]
P_iobj -> | |
#!/usr/bin/env python3
import hashlib
import json
import jsonschema
import os
import pathlib
import requests
import shutil
import stat
import sys
try:
from xpload_config import *
except ImportError:
__version__ = "0.0.0-notinstalled"
XPLOAD_CONFIG_SEARCH_PATHS = [".", "config"]
pass
general_schema = {
"definitions" : {
"entry": {
"properties" : {
"id" : {"type" : "integer"},
"name" : {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"detail" : {"type" : "string"}
}
}
},
"oneOf": [
{"type": "object", "$ref": "#/definitions/entry"},
{"type": "array", "items": {"$ref": "#/definitions/entry"}}
]
}
tags_schema = {
"type": "array",
"items": {
"properties" : {
"name" : {"type" : "string"},
"type" : {"type" : "string"},
"status" : {"type" : "string"},
"domains" : {"type" : "array", "items": {"type": "string"}}
},
"required": ["name", "type", "status", "domains"]
}
}
from collections import namedtuple
def nestednamedtuple(obj):
""" Returns nested list of namedtuples """
if isinstance(obj, dict):
fields = obj.keys()
namedtuple_type = namedtuple(typename='NNTuple', field_names=fields)
field_value_pairs = {str(field): nestednamedtuple(obj[field]) for field in fields}
try:
return namedtuple_type(**field_value_pairs)
except TypeError:
# If namedtuple cannot be created fallback to dict
return dict(**field_value_pairs)
elif isinstance(obj, (list, set, tuple, frozenset)):
return [nestednamedtuple(item) for item in obj]
else:
return obj
def _vlprint(minverb, msg):
if cfg.verbosity >= 3:
print(f"VL{minverb}:", end="")
if cfg.verbosity >= minverb:
print(msg)
class Config(namedtuple('Config', ['cfgf', 'host', 'port', 'apiroot', 'apiver', 'path', 'verbosity'])):
__slots__ = ()
def __new__(cls, cfgf, **kwargs):
kwargs = {k: v for k, v in kwargs.items() if k in cls._fields}
kwargs['cfgf'] = cfgf
return super().__new__(cls, **kwargs)
def url(self):
return "http://" + self.host + ':' + self.port + self.apiroot
def read_config(config_name, verbosity: int = None):
""" Read database parameters from a json config file """
# Use user supplied config as is if it looks like a "path"
if "." in config_name or "/" in config_name:
with open(f"{config_name}") as cfgf:
return json.load(cfgf, object_hook=lambda d: Config(os.path.realpath(cfgf.name), **d))
XPLOAD_CONFIG_DIR = os.getenv('XPLOAD_CONFIG_DIR', "").rstrip("/")
XPLOAD_CONFIG = os.getenv('XPLOAD_CONFIG', "prod")
search_paths = [XPLOAD_CONFIG_DIR] if XPLOAD_CONFIG_DIR else XPLOAD_CONFIG_SEARCH_PATHS
if config_name:
config_file = f"{config_name}.json"
else:
config_file = f"{XPLOAD_CONFIG}.json"
config = []
for config_path in search_paths:
try:
with open(f"{config_path}/{config_file}") as cfgf:
config = json.load(cfgf, object_hook=lambda d: Config(os.path.realpath(cfgf.name), **d))
except:
pass
if config:
if verbosity:
config['verbosity'] = verbosity
else:
print(f"Error: Cannot find config file {config_file} in", search_paths)
return config
def _get_data(endpoint: str, params: dict = {}):
""" Get data from the endpoint """
endpoints = ['gtPayloadLists']
for ep in endpoints:
if ep not in endpoint:
raise RuntimeError(f"Wrong endpoint {endpoint}")
url = cfg.url() + "/" + endpoint
_vlprint(3, f"-H 'Content-Type: application/json' -X GET -d '{json.dumps(params)}' {url}")
try:
response = requests.get(url=url, json=params)
response.raise_for_status()
respjson = response.json()
except Exception as e:
respmsg = f"{json.dumps(respjson)} " if respjson else ""
raise RuntimeError(f"Unexpected response for GET {json.dumps(params)} {url}: " + respmsg + repr(e))
return respjson
def _post_data(endpoint: str, params: dict):
""" Post data to the endpoint """
if endpoint not in ['gttype', 'gtstatus', 'gt', 'pt', 'pl', 'piov', 'pil', 'tag']:
raise RuntimeError(f"Wrong endpoint {endpoint}")
url = cfg.url() + "/" + endpoint
_vlprint(3, f"-H 'Content-Type: application/json' -X POST -d '{json.dumps(params)}' {url}")
respjson = None
try:
response = requests.post(url=url, json=params)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code < 400 or e.response.status_code >= 500:
raise
respjson = response.json()
jsonschema.validate(respjson, general_schema)
except Exception as e:
respmsg = f"{json.dumps(respjson)} " if respjson else ""
raise RuntimeError(f"Unexpected response for POST '{json.dumps(params)}' {url}: " + respmsg + repr(e))
return respjson
def _put_data(endpoint: str, params: dict):
""" Put data to the endpoint """
if endpoint not in ['pl_attach', 'piov_attach', 'gt_change_status']:
raise RuntimeError(f"Wrong endpoint {endpoint}")
url = cfg.url() + "/" + endpoint
_vlprint(3, f"-H 'Content-Type: application/json' -X PUT -d '{json.dumps(params)}' {url}")
respjson = None
try:
response = requests.put(url=url, json=params)
response.raise_for_status()
respjson = response.json()
jsonschema.validate(respjson, general_schema)
except Exception as e:
respmsg = f"{json.dumps(respjson)} " if respjson else ""
raise RuntimeError(f"Unexpected response for PUT '{json.dumps(params)}' {url}: " + respmsg + repr(e))
return respjson['name'] if 'name' in respjson else respjson['id']
def create_and_link_tag(tag_name: str, tag_type: str, tag_status: str, domains: list):
_post_data('gttype', {"name": tag_type})
_post_data('gtstatus', {"name": tag_status})
response = _post_data('gt', {"name": tag_name, "status": tag_status, "type": tag_type})['name']
for domain in domains:
_post_data('pt', {"name": domain})
try:
pill = _get_data(f'gtPayloadLists/{tag_name}')[domain]
except:
pill = _post_data('pl', {"payload_type": domain})['name']
_put_data('pl_attach', {"global_tag": tag_name, "payload_list": pill})
# Entities created if not existed but it is not an error
return 'ok' if response == tag_name else response
def create_and_link_pil(tag: str, domain: str, name: str, start: int, end: int, dry_run: bool):
try:
pill = _get_data(f'gtPayloadLists/{tag}')[domain]
except Exception as e:
raise RuntimeError(f"{tag}/{domain} does not exist: " + repr(e))
if dry_run: return
params = {"payload_url": name, "minor_iov": start, "major_iov": 0}
if end:
params["minor_iov_end"] = end
piov_id = _post_data('piov', params)['id']
_put_data('piov_attach', {"piov_id": piov_id, "payload_list": pill})
return 'ok'
def form_api_url(component: str, uid: int = None):
url = cfg.url()
if component == 'tags':
url += "/gt"
elif component == 'tag_types':
url += "/gttype"
elif component == 'tag_statuses':
url += "/gtstatus"
elif component == 'domains':
url += "/pt"
elif component == 'domain_lists':
url += "/pl"
elif component == 'payloads':
url += "/piov"
else:
print(f"Error: Wrong component {component}. Cannot form valid URL")
if uid is not None:
url += f"/{uid}"
return url
def fetch_entries(component: str, uid: int = None):
""" Fetch entries using respective endpoints """
url = form_api_url(component, uid)
try:
response = requests.get(url)
respjson = response.json()
except:
print(f"Error: Something went wrong while looking for {component} at {url}")
return []
# Always return a list
entries = respjson if isinstance(respjson, list) else [respjson]
try:
jsonschema.validate(entries, general_schema)
except:
error_details = f": {component} may not contain entry with id={uid}" if uid else ""
print(f"Error: Encountered invalid response from {url}", error_details)
return []
return entries
def payload_exists(payload_name: str) -> pathlib.Path:
prefixes = cfg.path if isinstance(cfg.path, list) else [cfg.path]
for prefix in prefixes:
payload_file = pathlib.Path(prefix)/payload_name
if payload_file.exists():
return payload_file
return None
def payload_copy(payload_file: pathlib.Path, prefixes: list, domain: str, dry_run=False) -> pathlib.Path:
""" Copies `payload_file` to the first valid `prefix` from the `prefixes` list """
# Check if file exists
if not payload_file.exists():
raise FileExistsError("File not found: " + str(payload_file))
# Check if at least one prefix exists and is writeable
good_prefixes = [prefix for prefix in prefixes if prefix.exists() and os.stat(prefix).st_mode & (stat.S_IXUSR | stat.S_IWUSR) ]
if not good_prefixes:
raise RuntimeError("No writable prefix provided: " + ":".join(map(str, prefixes)))
# The first good prefix is the prefix
prefix = good_prefixes[0]
# Extract basename, create payload name
md5sum = hashlib.md5(payload_file.open('rb').read()).hexdigest()
payload_name = f"{md5sum}_{payload_file.name}"
destination = prefix/domain/payload_name
if dry_run:
return destination
destination.parent.mkdir(parents=True, exist_ok=True)
# XXX Check destination file already exists?
shutil.copyfile(payload_file, destination)
# Verify the copy
md5sum_dst = hashlib.md5(destination.open('rb').read()).hexdigest()
if md5sum != md5sum_dst:
raise RuntimeError("Failed to copy payload file to ", destination)
return destination
def add_tag(tag_name: str, tag_type: str, tag_status: str, tag_domains: list = []):
# Remove duplicates in tag_domains
tag_domains = list(set(tag_domains))
# Use staged tags if exist
tags_file = pathlib.Path.cwd()/".xpload"/"tags.json"
# A list of payload intervals loaded from tags_file
tags = []
try:
tags = json.load(tags_file.open())
except OSError: # File not found. Create it silently
tags_file.parent.mkdir(exist_ok=True)
except json.JSONDecodeError as e: # json is not valid
raise RuntimeError("Found invalid tags stage. Fix or remove and try again: " + repr(e))
# Select a tag to update from the existing tags if any
existing_tags = [indx for indx, tag in enumerate(tags) if tag['name'] == tag_name]
if len(existing_tags) >= 2:
raise RuntimeError(f"Found invalid tags stage. Only one entry for \"{tag_name}\" can be staged")
# Remove tags with name tag_name if exist
tags = [tag for tag in tags if tag['name'] != tag_name]
# Insert updated tag
tag_entry = dict(name=tag_name, type=tag_type, status=tag_status, domains=tag_domains)
tags.append(tag_entry)
tags_file.write_text(json.dumps(tags, indent=2))
def add_pil(tag: str, domain: str, payload: pathlib.Path, start: int, end: int = None):
# Make assumptions about input values
if end:
assert start > 0 and end > start, "start must be greater than zero, end must be greater than start"
# Use staged pils if exist
pils_file = pathlib.Path.cwd()/'.xpload'/'pils.json'
# A list of payload intervals loaded from pils_file
pils = []
try:
pils = json.load(pils_file.open())
except OSError: # File not found. Create it silently
pils_file.parent.mkdir(exist_ok=True)
except json.JSONDecodeError as e: # json is not valid
raise RuntimeError("Found invalid pils stage. Fix or remove and try again: " + | |
of using private
methods in a hackish way) in Python 2.4, so is now deprecated.
Use *_from_string as described above.
More: Python 2.4 has a new email package, and the private functions
are gone. So this won't even work. We have to do something to
get this to work, for the 1.0.x branch, so use a different ugly
hack.
"""
warnings.warn("setPayload is deprecated. Use " \
"email.message_from_string(payload, _class=" \
"Message) instead.",
DeprecationWarning, 2)
new_me = email.message_from_string(payload, _class=Message)
self.__dict__.update(new_me.__dict__)
def setId(self, id):
if self.id and self.id != id:
raise ValueError(("MsgId has already been set,"
" cannot be changed %r %r") % (self.id, id))
if id is None:
raise ValueError("MsgId must not be None")
if not type(id) in (str,):
raise TypeError("Id must be a string")
if id == STATS_START_KEY:
raise ValueError("MsgId must not be " + STATS_START_KEY)
if id == STATS_STORAGE_KEY:
raise ValueError("MsgId must not be " + STATS_STORAGE_KEY)
self.id = id
self.message_info_db.load_msg(self)
def getId(self):
return self.id
def tokenize(self):
return tokenize(self)
def _force_CRLF(self, data):
"""Make sure data uses CRLF for line termination."""
return CRLF_RE.sub('\r\n', data)
def as_string(self, unixfrom=False, mangle_from_=True):
# The email package stores line endings in the "internal" Python
# format ('\n'). It is up to whoever transmits that information to
# convert to appropriate line endings (according to RFC822, that is
# \r\n *only*). imaplib *should* take care of this for us (in the
# append function), but does not, so we do it here
try:
fp = io.StringIO()
g = email.Generator.Generator(fp, mangle_from_=mangle_from_)
g.flatten(self, unixfrom)
return self._force_CRLF(fp.getvalue())
except TypeError:
parts = []
for part in self.get_payload():
parts.append(email.message.EmailMessage.as_string(part, unixfrom))
return self._force_CRLF("\n".join(parts))
def modified(self):
if self.id: # only persist if key is present
self.message_info_db.store_msg(self)
def GetClassification(self):
if self.c == PERSISTENT_SPAM_STRING:
return options['Headers', 'header_spam_string']
elif self.c == PERSISTENT_HAM_STRING:
return options['Headers', 'header_ham_string']
elif self.c == PERSISTENT_UNSURE_STRING:
return options['Headers', 'header_unsure_string']
return None
def RememberClassification(self, cls):
# this must store state independent of options settings, as they
# may change, which would really screw this database up
if cls == options['Headers', 'header_spam_string']:
self.c = PERSISTENT_SPAM_STRING
elif cls == options['Headers', 'header_ham_string']:
self.c = PERSISTENT_HAM_STRING
elif cls == options['Headers', 'header_unsure_string']:
self.c = PERSISTENT_UNSURE_STRING
else:
raise ValueError("Classification must match header strings in options")
self.modified()
def GetTrained(self):
return self.t
def RememberTrained(self, isSpam):
# isSpam == None means no training has been done
self.t = isSpam
self.modified()
def __repr__(self):
return "spambayes.message.Message%r" % repr(self.__getstate__())
def __getstate__(self):
return (self.id, self.c, self.t)
def __setstate__(self, t):
(self.id, self.c, self.t) = t
class SBHeaderMessage(Message):
'''Message class that is cognizant of SpamBayes headers.
Adds routines to add/remove headers for SpamBayes'''
def setPayload(self, payload):
"""DEPRECATED.
"""
warnings.warn("setPayload is deprecated. Use " \
"email.message_from_string(payload, _class=" \
"SBHeaderMessage) instead.",
DeprecationWarning, 2)
new_me = email.message_from_string(payload, _class=SBHeaderMessage)
self.__dict__.update(new_me.__dict__)
def setIdFromPayload(self):
try:
self.setId(self[options['Headers', 'mailid_header_name']])
except ValueError:
return None
return self.id
def setDisposition(self, prob):
if prob < options['Categorization', 'ham_cutoff']:
disposition = options['Headers', 'header_ham_string']
elif prob > options['Categorization', 'spam_cutoff']:
disposition = options['Headers', 'header_spam_string']
else:
disposition = options['Headers', 'header_unsure_string']
self.RememberClassification(disposition)
def addSBHeaders(self, prob, clues):
"""Add hammie header, and remember message's classification. Also,
add optional headers if needed."""
self.setDisposition(prob)
disposition = self.GetClassification()
self[options['Headers', 'classification_header_name']] = disposition
if options['Headers', 'include_score']:
disp = "%.*f" % (options["Headers", "header_score_digits"], prob)
if options["Headers", "header_score_logarithm"]:
if prob <= 0.005 and prob > 0.0:
x = -math.log10(prob)
disp += " (%d)" % x
if prob >= 0.995 and prob < 1.0:
x = -math.log10(1.0-prob)
disp += " (%d)" % x
self[options['Headers', 'score_header_name']] = disp
if options['Headers', 'include_thermostat']:
thermostat = '**********'
self[options['Headers', 'thermostat_header_name']] = \
thermostat[:int(prob*10)]
if options['Headers', 'include_evidence']:
hco = options['Headers', 'clue_mailheader_cutoff']
sco = 1 - hco
evd = []
for word, score in clues:
if (word == '*H*' or word == '*S*' \
or score <= hco or score >= sco):
if isinstance(word, str):
word = email.Header.Header(word,
charset='utf-8').encode()
try:
evd.append("%r: %.2f" % (word, score))
except TypeError:
evd.append("%r: %s" % (word, score))
# Line-wrap this header, because it can get very long. We don't
# use email.Header.Header because that can explode with unencoded
# non-ASCII characters. We can't use textwrap because that's 2.3.
wrappedEvd = []
headerName = options['Headers', 'evidence_header_name']
lineLength = len(headerName) + len(': ')
for component, index in zip(evd, list(range(len(evd)))):
wrappedEvd.append(component)
lineLength += len(component)
if index < len(evd)-1:
if lineLength + len('; ') + len(evd[index+1]) < 78:
wrappedEvd.append('; ')
else:
wrappedEvd.append(';\n\t')
lineLength = 8
self[headerName] = "".join(wrappedEvd)
if options['Headers', 'add_unique_id']:
self[options['Headers', 'mailid_header_name']] = self.id
self.addNotations()
def addNotations(self):
"""Add the appropriate string to the subject: and/or to: header.
This is a reasonably ugly method of including the classification,
but no-one has a better idea about how to allow filtering in
'stripped down' mailers (i.e. Outlook Express), so, for the moment,
this is it.
"""
disposition = self.GetClassification()
# options["Headers", "notate_to"] (and notate_subject) can be
# either a single string (like "spam") or a tuple (like
# ("unsure", "spam")). In Python 2.3 checking for a string in
# something that could be a string or a tuple works fine, but
# it dies in Python 2.2, because you can't do 'string in string',
# only 'character in string', so we allow for that.
if isinstance(options["Headers", "notate_to"], (str,)):
notate_to = (options["Headers", "notate_to"],)
else:
notate_to = options["Headers", "notate_to"]
if disposition in notate_to:
# Once, we treated the To: header just like the Subject: one,
# but that doesn't really make sense - and OE stripped the
# comma that we added, treating it as a separator, so it
# wasn't much use anyway. So we now convert the classification
# to an invalid address, and add that.
address = <EMAIL>" % (disposition, )
try:
self.replace_header("To", "%s,%s" % (address, self["To"]))
except KeyError:
self["To"] = address
if isinstance(options["Headers", "notate_subject"], (str,)):
notate_subject = (options["Headers", "notate_subject"],)
else:
notate_subject = options["Headers", "notate_subject"]
if disposition in notate_subject:
try:
self.replace_header("Subject", "%s,%s" % (disposition,
self["Subject"]))
except KeyError:
self["Subject"] = disposition
def delNotations(self):
"""If present, remove our notation from the subject: and/or to:
header of the message.
This is somewhat problematic, as we cannot be 100% positive that we
added the notation. It's almost certain to be us with the to:
header, but someone else might have played with the subject:
header. However, as long as the user doesn't turn this option on
and off, this will all work nicely.
See also [ 848365 ] Remove subject annotations from message review
page
"""
subject = self["Subject"]
if subject:
ham = options["Headers", "header_ham_string"] + ','
spam = options["Headers", "header_spam_string"] + ','
unsure = options["Headers", "header_unsure_string"] + ','
if options["Headers", "notate_subject"]:
for disp in (ham, spam, unsure):
if subject.startswith(disp):
self.replace_header("Subject", subject[len(disp):])
# Only remove one, maximum.
break
to = self["To"]
if to:
ham = <EMAIL>," % \
(options["Headers", "header_ham_string"],)
spam = <EMAIL>," % \
(options["Headers", "header_spam_string"],)
unsure = <EMAIL>," % \
(options["Headers", "header_unsure_string"],)
if options["Headers", "notate_to"]:
for disp in (ham, spam, unsure):
if to.startswith(disp):
self.replace_header("To", to[len(disp):])
# Only remove one, maximum.
break
def currentSBHeaders(self):
"""Return a dictionary containing the current values of the
SpamBayes headers. This can be used to restore the values
after using the delSBHeaders() function."""
headers = {}
for header_name in [options['Headers', 'classification_header_name'],
options['Headers', 'mailid_header_name'],
(options['Headers', 'classification_header_name']
+ "-ID"),
options['Headers', 'thermostat_header_name'],
options['Headers', 'evidence_header_name'],
options['Headers', 'score_header_name'],
options['Headers', 'trained_header_name'],
]:
value = self[header_name]
if value is not None:
headers[header_name] = value
return headers
def delSBHeaders(self):
del self[options['Headers', 'classification_header_name']]
del self[options['Headers', 'mailid_header_name']]
del self[options['Headers', 'classification_header_name'] + "-ID"] # test mode header
del self[options['Headers', 'thermostat_header_name']]
del self[options['Headers', 'evidence_header_name']]
del self[options['Headers', 'score_header_name']]
del self[options['Headers', 'trained_header_name']]
# Also delete notations - typically this is called just before
# training, and we don't want them there for that.
self.delNotations()
# Utility function to insert an exception header into the given RFC822 text.
# This is used by both sb_server and sb_imapfilter, so it's handy to have
# it available separately.
def insert_exception_header(string_msg, msg_id=None):
"""Insert an exception header into the given RFC822 message (as text).
Returns a tuple of the new message text and the exception details."""
stream = io.StringIO()
traceback.print_exc(None, stream)
details = stream.getvalue()
# | |
"""Defines the database models for a batch"""
from __future__ import unicode_literals
import logging
from collections import namedtuple
import django.contrib.postgres.fields
from django.db import connection, models, transaction
from django.db.models import F, Q
from django.utils.timezone import now
from batch.configuration.configuration import BatchConfiguration
from batch.configuration.json.configuration_v6 import convert_configuration_to_v6, BatchConfigurationV6
from batch.definition.exceptions import InvalidDefinition
from batch.definition.json.definition_v6 import convert_definition_to_v6, BatchDefinitionV6
from batch.exceptions import BatchError
from job.models import JobType
from messaging.manager import CommandMessageManager
from queue.models import Queue
from recipe.configuration.data.recipe_data import LegacyRecipeData
from recipe.diff.forced_nodes import ForcedNodes
from recipe.messages.create_recipes import create_reprocess_messages
from recipe.models import Recipe, RecipeType, RecipeTypeRevision
from storage.models import ScaleFile, Workspace
from trigger.models import TriggerEvent
from util import parse as parse_utils
from util import rest as rest_utils
from util.exceptions import ValidationException
logger = logging.getLogger(__name__)
BatchValidation = namedtuple('BatchValidation', ['is_valid', 'errors', 'warnings', 'batch'])
class BatchManager(models.Manager):
"""Provides additional methods for handling batches"""
def create_batch_v6(self, title, description, recipe_type, event, definition, configuration=None):
"""Creates a new batch that will contain a collection of recipes to process. The definition and configuration
will be stored in version 6 of their respective schemas. This method will only create the batch, not its
recipes. To create the batch's recipes, a CreateBatchRecipes message needs to be sent to the messaging backend.
:param title: The human-readable name of the batch
:type title: string
:param description: A human-readable description of the batch
:type description: string
:param recipe_type: The type of recipes that will be created for this batch
:type recipe_type: :class:`recipe.models.RecipeType`
:param event: The event that created this batch
:type event: :class:`trigger.models.TriggerEvent`
:param definition: The definition for running the batch
:type definition: :class:`batch.definition.definition.BatchDefinition`
:param configuration: The batch configuration
:type configuration: :class:`batch.configuration.configuration.BatchConfiguration`
:returns: The newly created batch
:rtype: :class:`batch.models.Batch`
:raises :class:`batch.configuration.exceptions.InvalidConfiguration`: If the configuration is invalid
:raises :class:`batch.definition.exceptions.InvalidDefinition`: If the definition is invalid
"""
batch = Batch()
batch.title = title
batch.description = description
batch.recipe_type = recipe_type
batch.recipe_type_rev = RecipeTypeRevision.objects.get_revision(recipe_type.name, recipe_type.revision_num)
batch.event = event
batch.definition = convert_definition_to_v6(definition).get_dict()
batch.configuration = convert_configuration_to_v6(configuration).get_dict()
with transaction.atomic():
if definition.root_batch_id is not None:
# Find latest batch with the root ID and supersede it
try:
superseded_batch = Batch.objects.get_locked_batch_from_root(definition.root_batch_id)
except Batch.DoesNotExist:
raise InvalidDefinition('PREV_BATCH_NOT_FOUND', 'No batch with that root ID exists')
batch.root_batch_id = superseded_batch.root_batch_id
batch.superseded_batch = superseded_batch
self.supersede_batch(superseded_batch.id, now())
definition.validate(batch)
configuration.validate(batch)
batch.recipes_estimated = definition.estimated_recipes
batch.save()
if batch.root_batch_id is None: # Batches with no superseded batch are their own root
batch.root_batch_id = batch.id
Batch.objects.filter(id=batch.id).update(root_batch_id=batch.id)
# Create models for batch metrics
batch_metrics_models = []
for job_name in recipe_type.get_definition().get_topological_order():
batch_metrics_model = BatchMetrics()
batch_metrics_model.batch_id = batch.id
batch_metrics_model.job_name = job_name
batch_metrics_models.append(batch_metrics_model)
BatchMetrics.objects.bulk_create(batch_metrics_models)
return batch
def calculate_estimated_recipes(self, batch, definition):
"""Calculates the estimated number of recipes that will be created for this batch.
This number is calculated by:
1. The number of existing recipes for the specific recipe type that are
not currently superseded
2. The number of sub-recipes in the recipe
These should be filtered if not changed/marked for re-run?
"""
# If this is a previous batch, use the previous batch total
if batch.superseded_batch:
return batch.superseded_batch.recipes_total
# No files defined to run on, so no recipes will be created
if not definition.dataset:
return 0
#: The number of recipes are calculated based on the following:
# - If the dataset has a global parameter matching the input of the
# recipe type, count the number of files in each dataset member
# - If the dataset has a parameter matching the input of the recipe
# type, count the number of files in each member that matches the parameter
from data.interface.exceptions import InvalidInterfaceConnection
from data.models import DataSet, DataSetMember, DataSetFile
dataset = DataSet.objects.get(pk=definition.dataset)
dataset_definition = dataset.get_definition()
recipe_type = RecipeTypeRevision.objects.get_revision(name=batch.recipe_type.name, revision_num=batch.recipe_type_rev.revision_num).recipe_type
# combine the parameters
dataset_parameters = dataset_definition.global_parameters
for param in dataset_definition.parameters.parameters:
dataset_parameters.add_parameter(dataset_definition.parameters.parameters[param])
try:
recipe_type.get_definition().input_interface.validate_connection(dataset_parameters)
except InvalidInterfaceConnection as ex:
logger.info('DataSet parameters do not match the recipe inputs; no recipes will be created: %s' % unicode(ex))
return 0
recipe_inputs = recipe_type.get_definition().get_input_keys()
# Base count of recipes are number of files in the dataset that match the recipe inputs
files = DataSetFile.objects.get_files([dataset.id], recipe_inputs)
num_files = len(files)
from recipe.models import RecipeTypeSubLink
estimated_recipes = num_files
# If all nodes are forced:
if definition.forced_nodes and definition.forced_nodes.all_nodes:
# Count the number of sub-recipes
subs_count = RecipeTypeSubLink.objects.count_subrecipes(batch.recipe_type_id, recurse=True)
estimated_recipes += (num_files * subs_count)
else:
# Only count the sub-recipes nodes that are forced, or in the lineage of a forced node
nodes = recipe_type.get_v6_definition_json()['nodes']
subs = [node for node in nodes if nodes[node]['node_type']['node_type'] == 'recipe']
for sub in subs:
sub_type_id = RecipeType.objects.get(name=nodes[sub]['node_type']['recipe_type_name'], revision_num=nodes[sub]['node_type']['recipe_type_revision']).id
# If sub-recipe is selected as a forced node
if sub in definition.forced_nodes.get_sub_recipe_names():
estimated_recipes += (1 + RecipeTypeSubLink.objects.count_subrecipes(sub_type_id, recurse=True)) * num_files
# If it's a child of a forced job node, we're going to need to run it
else:
recipe_type_def = recipe_type.get_definition()
for job_node in definition.forced_nodes.get_forced_node_names():
if recipe_type_def.has_descendant(job_node, sub):
estimated_recipes += (1 + RecipeTypeSubLink.objects.count_subrecipes(sub_type_id, recurse=True)) * num_files
return estimated_recipes
def get_batch_from_root(self, root_batch_id):
"""Returns the latest (non-superseded) batch model with the given root batch ID. The returned model
will have no related fields populated.
:param root_batch_id: The root batch ID
:type root_batch_id: int
:returns: The batch model
:rtype: :class:`batch.models.Batch`
"""
return self.get(root_batch_id=root_batch_id, is_superseded=False)
def edit_batch_v6(self, batch, title=None, description=None, configuration=None):
"""Edits the given batch to update any of the given fields. The configuration will be stored in version 6 of its
schemas.
:param batch: The batch to edit
:type batch: :class:`batch.models.Batch`
:param title: The human-readable name of the batch
:type title: string
:param description: A human-readable description of the batch
:type description: string
:param configuration: The batch configuration
:type configuration: :class:`batch.configuration.configuration.BatchConfiguration`
:raises :class:`batch.configuration.exceptions.InvalidConfiguration`: If the configuration is invalid
"""
update_fields = {}
if title is not None:
update_fields['title'] = title
if description is not None:
update_fields['description'] = description
if configuration:
configuration.validate(batch)
configuration_dict = convert_configuration_to_v6(configuration).get_dict()
update_fields['configuration'] = configuration_dict
if update_fields:
Batch.objects.filter(id=batch.id).update(**update_fields)
def get_batch_comparison_v6(self, root_batch_id):
"""Returns the batch metrics for the v6 batch comparison REST API
:param root_batch_id: The root batch ID of the batches to compare
:type root_batch_id: int
:returns: The list of batches in the chain
:rtype: list
"""
from batch.serializers import BatchBaseSerializerV6
batches = Batch.objects.filter(root_batch_id=root_batch_id).prefetch_related('metrics')
batches = batches.defer('definition', 'configuration').order_by('id')
batch_list = []
job_metrics_dict = {}
for batch in batches:
batch_list.append(BatchBaseSerializerV6(batch).data)
batch.batch_metrics_dict = {}
for batch_metrics in batch.metrics.all():
batch.batch_metrics_dict[batch_metrics.job_name] = batch_metrics
if batch_metrics.job_name not in job_metrics_dict:
job_metrics = {'jobs_total': [], 'jobs_pending': [], 'jobs_blocked': [], 'jobs_queued': [],
'jobs_running': [], 'jobs_failed': [], 'jobs_completed': [], 'jobs_canceled': [],
'min_seed_duration': [], 'avg_seed_duration': [], 'max_seed_duration': [],
'min_job_duration': [], 'avg_job_duration': [], 'max_job_duration': []}
job_metrics_dict[batch_metrics.job_name] = job_metrics
metrics_dict = {'jobs_total': [], 'jobs_pending': [], 'jobs_blocked': [], 'jobs_queued': [], 'jobs_running': [],
'jobs_failed': [], 'jobs_completed': [], 'jobs_canceled': [], 'recipes_estimated': [],
'recipes_total': [], 'recipes_completed': [], 'job_metrics': job_metrics_dict}
for batch in batches:
metrics_dict['jobs_total'].append(batch.jobs_total)
metrics_dict['jobs_pending'].append(batch.jobs_pending)
metrics_dict['jobs_blocked'].append(batch.jobs_blocked)
metrics_dict['jobs_queued'].append(batch.jobs_queued)
metrics_dict['jobs_running'].append(batch.jobs_running)
metrics_dict['jobs_failed'].append(batch.jobs_failed)
metrics_dict['jobs_completed'].append(batch.jobs_completed)
metrics_dict['jobs_canceled'].append(batch.jobs_canceled)
metrics_dict['recipes_estimated'].append(batch.recipes_estimated)
metrics_dict['recipes_total'].append(batch.recipes_total)
metrics_dict['recipes_completed'].append(batch.recipes_completed)
for job_name, job_metrics in job_metrics_dict.items():
if job_name in batch.batch_metrics_dict:
batch_metrics = batch.batch_metrics_dict[job_name]
job_metrics['jobs_total'].append(batch_metrics.jobs_total)
job_metrics['jobs_pending'].append(batch_metrics.jobs_pending)
job_metrics['jobs_blocked'].append(batch_metrics.jobs_blocked)
job_metrics['jobs_queued'].append(batch_metrics.jobs_queued)
job_metrics['jobs_running'].append(batch_metrics.jobs_running)
job_metrics['jobs_failed'].append(batch_metrics.jobs_failed)
job_metrics['jobs_completed'].append(batch_metrics.jobs_completed)
job_metrics['jobs_canceled'].append(batch_metrics.jobs_canceled)
if batch_metrics.min_seed_duration is not None:
min_seed_duration = parse_utils.duration_to_string(batch_metrics.min_seed_duration)
else:
min_seed_duration = None
job_metrics['min_seed_duration'].append(min_seed_duration)
if batch_metrics.avg_seed_duration is not None:
avg_seed_duration = parse_utils.duration_to_string(batch_metrics.avg_seed_duration)
else:
avg_seed_duration = None
job_metrics['avg_seed_duration'].append(avg_seed_duration)
if batch_metrics.max_seed_duration is not None:
max_seed_duration = parse_utils.duration_to_string(batch_metrics.max_seed_duration)
else:
max_seed_duration = None
job_metrics['max_seed_duration'].append(max_seed_duration)
if batch_metrics.min_job_duration is not None:
min_job_duration = parse_utils.duration_to_string(batch_metrics.min_job_duration)
else:
min_job_duration = None
job_metrics['min_job_duration'].append(min_job_duration)
if batch_metrics.avg_job_duration is not None:
avg_job_duration = parse_utils.duration_to_string(batch_metrics.avg_job_duration)
else:
avg_job_duration = None
job_metrics['avg_job_duration'].append(avg_job_duration)
if batch_metrics.max_job_duration is not None:
max_job_duration = parse_utils.duration_to_string(batch_metrics.max_job_duration)
else:
max_job_duration = None
job_metrics['max_job_duration'].append(max_job_duration)
else:
for metric_name in job_metrics:
job_metrics[metric_name].append(None) # Batch does not have this job, fill in metrics with None
return {'batches': batch_list, 'metrics': metrics_dict}
def get_batches_v6(self, started=None, ended=None, recipe_type_ids=None, is_creation_done=None, is_superseded=None,
root_batch_ids=None, order=None):
"""Returns a list of batches for the v6 batches REST API
:param started: Query batches updated after this time
:type started: :class:`datetime.datetime`
:param ended: Query batches updated before this time
:type ended: :class:`datetime.datetime`
:param recipe_type_ids: Query batches with these recipe types
:type recipe_type_ids: list
:param is_creation_done: Query batches that match this value
:type is_creation_done: bool
:param is_superseded: Query batches that match this value
:type is_superseded: bool
:param root_batch_ids: Query batches with these root batches
:type root_batch_ids: list
:param order: A list of fields to control the sort order
:type order: list
:returns: The list of batches that | |
<reponame>HazyResearch/domino
from __future__ import annotations
from collections import defaultdict
from multiprocessing.sharedctypes import Value
from typing import Union
import meerkat as mk
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from domino.utils import convert_to_numpy, unpack_args
from .abstract import Slicer
class BarlowSlicer(Slicer):
r"""
Slice Discovery based on the Barlow [singla_2021]_.
Discover slices using a decision tree. TODO(singlasahil14): add any more details
describing your method
.. note:
The authors of the Barlow paper use this slicer with embeddings from a
classifier trained using an adversarially robust loss [engstrom_2019]_.
To compute embeddings using such a classifier, pass ``encoder="robust"`` to
``domino.embed``.
Examples
--------
Suppose you've trained a model and stored its predictions on a dataset in
a `Meerkat DataPanel <https://github.com/robustness-gym/meerkat>`_ with columns
"emb", "target", and "pred_probs". After loading the DataPanel, you can discover
underperforming slices of the validation dataset with the following:
.. code-block:: python
from domino import BarlowSlicer
dp = ... # Load dataset into a Meerkat DataPanel
# split dataset
valid_dp = dp.lz[dp["split"] == "valid"]
test_dp = dp.lz[dp["split"] == "test"]
barlow = BarlowSlicer()
barlow.fit(
data=valid_dp, embeddings="emb", targets="target", pred_probs="pred_probs"
)
dp["barlow_slices"] = barlow.transform(
data=test_dp, embeddings="emb", targets="target", pred_probs="pred_probs"
)
Args:
n_slices (int, optional): The number of slices to discover.
Defaults to 5.
max_depth (str, optional): The maximum depth of the desicion tree. Defaults to
3. If None, then nodes are expanded until all leaves are pure or until all
leaves contain less than 2 samples. See SKlearn documentation for more
information.
n_features (int, optional): The number features from the embedding
to use. Defaults to 128. Features are selcted using mutual information
estimate.
pbar (bool, optional): Whether to show a progress bar. Ignored for barlow.
.. [singla_2021]
<NAME>, et al. "Understanding failures of deep networks via robust
feature extraction." Proceedings of the IEEE/CVF Conference on Computer Vision
and Pattern Recognition. 2021.
.. [engstrom_2019]
@misc{robustness,
title={Robustness (Python Library)},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
year={2019},
url={https://github.com/MadryLab/robustness}
}
"""
def __init__(
self,
n_slices: int = 5,
max_depth: int = 3, # TODO(singlasahil14): confirm this default
n_features: int = 128, # TODO(singlasahil14): confirm this default
pbar: bool = True,
):
super().__init__(n_slices=n_slices)
self.config.max_depth = max_depth
self.config.n_features = n_features
# parameters set after a call to fit
self._feature_indices = None
self._important_leaf_ids = None
self._decision_tree = None
def fit(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
) -> BarlowSlicer:
"""
Fit the decision tree to data.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
Returns:
BarlowSlicer: Returns a fit instance of BarlowSlicer.
"""
embeddings, targets, pred_probs = unpack_args(
data, embeddings, targets, pred_probs
)
embeddings, targets, pred_probs = convert_to_numpy(
embeddings, targets, pred_probs
)
if pred_probs.ndim > 1:
preds = pred_probs.argmax(axis=-1)
else:
preds = pred_probs > 0.5
success = preds == targets
failure = np.logical_not(success)
sparse_features, feature_indices = _select_important_features(
embeddings,
failure,
num_features=self.config.n_features,
method="mutual_info",
)
self._feature_indices = feature_indices
decision_tree = _train_decision_tree(
sparse_features,
failure,
max_depth=self.config.max_depth,
criterion="entropy",
)
(
error_rate_array,
error_coverage_array,
) = decision_tree.compute_leaf_error_rate_coverage(sparse_features, failure)
important_leaf_ids = _important_leaf_nodes(
decision_tree, error_rate_array, error_coverage_array
)
self._decision_tree = decision_tree
self._important_leaf_ids = important_leaf_ids
return self
def predict(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
) -> np.ndarray:
"""
Predict slice membership according to the learnt decision tree.
.. caution::
Must call ``BarlowSlicer.fit`` prior to calling ``BarlowSlicer.predict``.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
Returns:
np.ndarray: A binary ``np.ndarray`` of shape (n_samples, n_slices) where
values are either 1 or 0.
"""
if self._decision_tree is None:
raise ValueError(
"Must call `fit` prior to calling `predict` or `predict_proba`."
)
(embeddings,) = unpack_args(data, embeddings)
(embeddings,) = convert_to_numpy(embeddings)
embeddings = embeddings[:, self._feature_indices]
leaves = self._decision_tree.apply(embeddings) # (n_samples,)
# convert to 1-hot encoding of size (n_samples, n_slices) using broadcasting
slices = (
leaves[:, np.newaxis] == self._important_leaf_ids[np.newaxis, :]
).astype(int)
return slices
def predict_proba(
self,
data: Union[dict, mk.DataPanel] = None,
embeddings: Union[str, np.ndarray] = "embedding",
targets: Union[str, np.ndarray] = "target",
pred_probs: Union[str, np.ndarray] = "pred_probs",
) -> np.ndarray:
"""
Predict slice membership according to the learnt decision tree.
.. warning::
Because the decision tree does not produce probabilistic leaf assignments,
this method is equivalent to `predict`
.. caution::
Must call ``BarlowSlicer.fit`` prior to calling
``BarlowSlicer.predict_proba``.
Args:
data (mk.DataPanel, optional): A `Meerkat DataPanel` with columns for
embeddings, targets, and prediction probabilities. The names of the
columns can be specified with the ``embeddings``, ``targets``, and
``pred_probs`` arguments. Defaults to None.
embeddings (Union[str, np.ndarray], optional): The name of a colum in
``data`` holding embeddings. If ``data`` is ``None``, then an np.ndarray
of shape (n_samples, dimension of embedding). Defaults to
"embedding".
targets (Union[str, np.ndarray], optional): The name of a column in
``data`` holding class labels. If ``data`` is ``None``, then an
np.ndarray of shape (n_samples,). Defaults to "target".
pred_probs (Union[str, np.ndarray], optional): The name of
a column in ``data`` holding model predictions (can either be "soft"
probability scores or "hard" 1-hot encoded predictions). If
``data`` is ``None``, then an np.ndarray of shape (n_samples, n_classes)
or (n_samples,) in the binary case. Defaults to "pred_probs".
Returns:
np.ndarray: A ``np.ndarray`` of shape (n_samples, n_slices) where values in
are in range [0,1] and rows sum to 1.
"""
return self.predict(data, embeddings, targets, pred_probs)
def _mutual_info_select(train_features_class, train_failure_class, num_features=20):
from sklearn.feature_selection import mutual_info_classif
mi = mutual_info_classif(train_features_class, train_failure_class, random_state=0)
important_features_indices = np.argsort(mi)[-num_features:]
important_features_values = mi[important_features_indices]
return important_features_indices, important_features_values
def _feature_importance_select(train_features_class, num_features=20):
fi = np.mean(train_features_class, axis=0)
important_features_indices = np.argsort(fi)[-num_features:]
important_features_values = fi[important_features_indices]
return important_features_indices, important_features_values
def _select_important_features(
train_features, train_failure, num_features=20, method="mutual_info"
):
"""Perform feature selection using some prespecified method such as
mutual information.
Args:
train_features (_type_): _description_
train_failure (_type_): _description_
num_features (int, optional): _description_. Defaults to 20.
method (str, optional): _description_. Defaults to 'mutual_info'.
Raises:
ValueError: _description_
Returns:
_type_: _description_
"""
if method == "mutual_info":
important_indices, _ = _mutual_info_select(
train_features, train_failure, num_features=num_features
)
elif method == "feature_importance":
important_indices, _ = _feature_importance_select(
train_features, num_features=num_features
)
else:
raise ValueError("Unknown feature selection method")
train_sparse_features = train_features[:, important_indices]
return train_sparse_features, important_indices
class BarlowDecisionTreeClassifier(DecisionTreeClassifier):
"""Extension of scikit-learn's DecisionTreeClassifier"""
def fit_tree(self, train_data, train_labels):
"""Learn decision tree using features 'train_data' and labels 'train_labels"""
num_true = np.sum(train_labels)
num_false = np.sum(np.logical_not(train_labels))
if self.class_weight == "balanced":
self.float_class_weight = num_false / num_true
elif isinstance(self.class_weight, dict):
keys_list = list(self.class_weight.keys())
assert len(keys_list) == 2
assert 0 in keys_list
assert 1 in keys_list
self.float_class_weight = self.class_weight[1]
self.fit(train_data, train_labels)
true_dict, false_dict = self.compute_TF_dict(train_data, train_labels)
self.train_true_dict | |
depth2dtype = {
# cv.IPL_DEPTH_8U: 'uint8',
# cv.IPL_DEPTH_8S: 'int8',
# cv.IPL_DEPTH_16U: 'uint16',
# cv.IPL_DEPTH_16S: 'int16',
# cv.IPL_DEPTH_32S: 'int32',
# cv.IPL_DEPTH_32F: 'float32',
# cv.IPL_DEPTH_64F: 'float64',
# }
# arrdtype=im.depth
# a = np.fromstring(
# im.tostring(),
# dtype=depth2dtype[im.depth],
# count=im.width*im.height*im.nChannels)
# a.shape = (im.height,im.width,im.nChannels)
# return a
# def to_cv(a):
# dtype2depth = {
# 'uint8': cv.IPL_DEPTH_8U,
# 'int8': cv.IPL_DEPTH_8S,
# 'uint16': cv.IPL_DEPTH_16U,
# 'int16': cv.IPL_DEPTH_16S,
# 'int32': cv.IPL_DEPTH_32S,
# 'float32': cv.IPL_DEPTH_32F,
# 'float64': cv.IPL_DEPTH_64F,
# }
# try:
# nChannels = a.shape[2]
# except:
# nChannels = 1
# cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]),
# dtype2depth[str(a.dtype)],
# nChannels)
# cv.SetData(cv_im, a.tostring(),
# a.dtype.itemsize*nChannels*a.shape[1])
# return cv_im
#def to_pil(im): return Image.fromarray(np.uint8(im))
def to_pil(im):
#print im.dtype
return Image.fromarray(np.uint8(im))
def from_pil(pil):
#print pil
return np.array(pil)
def to_pylab(a): return np.uint8(a)
def test_draw_text():
im = 255 + np.zeros((300, 300, 3))
show([draw_text(im, ['hello', 'world'], [(100, 200), (0, 0)], [(255, 0, 0), (0, 255, 0)]),
draw_text(im, ['hello', 'world'], [(100, 100), (0, 0)], [(255, 0, 0), (0, 255, 0)], font_size = 12)])
def save_tmp(im, encoding = '.png', dir = None):
fname = ut.make_temp(encoding, dir = dir)
save(fname, im)
return fname
def save_tmp_nfs(im, encoding = '.png'):
return save_tmp(im, encoding, '/csail/vision-billf5/aho/tmp')
# def resize(im, size):
# if type(size) == type(1):
# size = float(size)
# #return scipy.misc.pilutil.imresize(im, size)
# return scipy.misc.imresize(im, size)
#def resize(im, scale, order = 3, hires = 'auto'):
def resize(im, scale, order = 3, hires = False):
if hires == 'auto':
hires = (im.dtype == np.uint8)
if np.ndim(scale) == 0:
new_scale = [scale, scale]
# interpret scale as dimensions; convert integer size to a fractional scale
elif ((scale[0] is None) or type(scale[0]) == type(0)) \
and ((scale[1] is None) or type(scale[1]) == type(0)) \
and (not (scale[0] is None and scale[1] is None)):
# if the size of only one dimension is provided, scale the other to maintain the right aspect ratio
if scale[0] is None:
dims = (int(float(im.shape[0])/im.shape[1]*scale[1]), scale[1])
elif scale[1] is None:
dims = (scale[0], int(float(im.shape[1])/im.shape[0]*scale[0]))
else:
dims = scale[:2]
new_scale = [float(dims[0] + 0.4)/im.shape[0], float(dims[1] + 0.4)/im.shape[1]]
# a test to make sure we set the floating point scale correctly
result_dims = [int(new_scale[0]*im.shape[0]), int(new_scale[1]*im.shape[1])]
assert tuple(result_dims) == tuple(dims)
elif type(scale[0]) == type(0.) and type(scale[1]) == type(0.):
new_scale = scale
#new_scale = scale[1], scale[0]
else:
raise RuntimeError("don't know how to interpret scale: %s" % (scale,))
# want new scale' to be such that
# int(scale'[0]*im.shape[0]) = scale[0], etc. (that's how zoom computes the new shape)
# todo: any more numerical issues?
#print 'scale before', im.shape, scale
# print 'scale after', scale
# print 'new image size', [int(scale[0]*im.shape[0]),int(scale[1]*im.shape[1])]
#scale_param = new_scale if im.ndim == 2 else (new_scale[0], new_scale[1], 1)
scale_param = new_scale if im.ndim == 2 else (new_scale[0], new_scale[1], 1)
if hires:
#sz = map(int, (scale_param*im.shape[1], scale_param*im.shape[0]))
sz = map(int, (scale_param[1]*im.shape[1], scale_param[0]*im.shape[0]))
return from_pil(to_pil(im).resize(sz, Image.ANTIALIAS))
else:
res = scipy.ndimage.zoom(im, scale_param, order = order)
# verify that zoom() returned an image of the desired size
if (np.ndim(scale) != 0) and type(scale[0]) == type(0) and type(scale[1]) == type(0):
assert res.shape[:2] == (scale[0], scale[1])
return res
scale = resize
# import skimage
# resize = skimage.imresize
def test_resize():
im = make(44, 44)
assert resize(im, (121, 120, 't')).shape[:2] == (121, 120)
assert resize(im, (2., 0.5, 't')).shape[:2] == (88, 22)
def show_file(fname):
show(load(fname))
def img_extensions():
return ['png', 'gif', 'jpg', 'jpeg', 'bmp', 'ppm', 'pgm']
def is_img_file(fname):
return any(fname.lower().endswith(ext) for ext in img_extensions())
def blur(im, sigma):
if np.ndim(im) == 2:
return scipy.ndimage.filters.gaussian_filter(im, sigma)
else:
return np.concatenate([scipy.ndimage.filters.gaussian_filter(im[:, :, i], sigma)[:, :, np.newaxis] for i in xrange(im.shape[2])], axis = 2)
def blit(src, dst, x, y, opt = None):
if opt == 'center':
x -= src.shape[1]/2
y -= src.shape[0]/2
# crop intersecting
dx, dy, dw, dh = ut.crop_rect_to_img((x, y, src.shape[1], src.shape[0]), dst)
sx = dx - x
sy = dy - y
dst[dy : dy + dh, dx : dx + dw] = src[sy : sy + dh, sx : sx + dw]
def weighted_add(src, dst, x, y, src_weight, dst_weight, opt = None):
if opt == 'center':
x -= src.shape[1]/2
y -= src.shape[0]/2
# crop intersecting
dx, dy, dw, dh = ut.crop_rect_to_img((x, y, src.shape[1], src.shape[0]), dst)
sx = dx - x
sy = dy - y
dst[dy : dy + dh, dx : dx + dw] = dst[dy : dy + dh, dx : dx + dw]*dst_weight + src[sy : sy + dh, sx : sx + dw]*src_weight
def make(w, h, fill = (0,0,0)):
return np.uint8(np.tile([[fill]], (h, w, 1)))
def luminance_rgb(im): return rgb_from_gray(luminance(im))
def rotate(img, angle, fill = 0):
""" Rotate image around its center by the given angle (in
radians). No interpolation is used; indices are rounded. The
returned image may be larger than the original, but the middle
pixel corresponds to the middle of the original. Pixels with no
correspondence are filled as 'fill'.
Also returns mapping from original image to rotated. """
r = int(np.ceil(np.sqrt(img.shape[0]**2 + img.shape[1]**2)))
X, Y = np.mgrid[0:r, 0:r]
X = X.flatten()
Y = Y.flatten()
X2 = np.array(np.round(img.shape[1]/2 + np.cos(angle) * (X - r/2) - np.sin(angle) * (Y - r/2)), dtype = int)
Y2 = np.array(np.round(img.shape[0]/2 + np.sin(angle) * (X - r/2) + np.cos(angle) * (Y - r/2)), dtype = int)
good = ut.logical_and_many(X2 >= 0, X2 < img.shape[1], Y2 >= 0, Y2 < img.shape[0])
out = fill + np.zeros((r, r) if img.ndim == 2 else (r, r, img.shape[2]), dtype = img.dtype)
out[Y[good], X[good]] = img[Y2[good], X2[good]]
T = np.dot(np.dot(ut.rigid_transform(np.eye(2), [img.shape[1]/2, img.shape[0]/2]),
ut.rigid_transform(ut.rotation_matrix2(angle))),
ut.rigid_transform(np.eye(2), [-r/2, -r/2]))
return out, np.linalg.inv(T)
def map_img(f, im, dtype = None, components = None):
new_im = np.zeros(im.shape if components is None else im.shape + (components,), \
dtype = im.dtype if dtype is None else dtype)
for y in xrange(im.shape[0]):
for x in xrange(im.shape[1]):
new_im[y,x] = f(im[y,x])
return new_im
def add_border(img, w, h, color = (0, 0, 0)):
assert 0 <= w
assert 0 <= h
out = make(img.shape[1] + 2*w, img.shape[0] + 2*h, color)
out[h:(h + img.shape[0]), w : (w + img.shape[1])] = img
return out
def pad_corner(im, pw, ph, color = (0, 0, 0)):
out = make(im.shape[1] + pw, im.shape[0] + ph, color)
out[:im.shape[0], :im.shape[1]] = im
return out
def expand(im, new_shape, opt = 'center'):
if type(new_shape) == type(0.):
new_w = int(im.shape[1]*new_shape)
new_h = int(im.shape[0]*new_shape)
elif type(new_shape) == type((1,)):
new_shape = new_shape[:2]
new_h, new_w = new_shape
else:
raise RuntimeError("Don't know how to interpret shape")
if im.shape[0] >= new_h and im.shape[1] >= new_w:
return im.copy()
else:
im = rgb_from_gray(im)
r = make(new_w, new_h)
if opt == 'center':
blit(im, r, im.shape[1]/2, im.shape[0]/2, opt = 'center')
elif opt == 'corner':
r[:im.shape[0], :im.shape[1]] = im
return r
def combine_rgb(r, g, b):
a = np.zeros(r.shape + (3,))
a[:,:,0] = r
a[:,:,1] = g
a[:,:,2] = b
return a
def compute_pyramid(ptm, interval, min_size):
# based on pff's featpyramid.m
# todo: upsample one level
sc = 2**(1.0/interval)
imsize = im.shape[:2]
max_scale = int(1 + np.floor(np.log(np.min(imsize)/min_size)/np.log(sc)))
ims = [None]*max_scale
scale = [None]*len(ims)
# skipping 2x scale
for i in xrange(1, interval+1):
im_scaled = resize(ptm, 1/sc**(i-1))
ims[-1 + i] = im_scaled
scale[-1 + i] = 1/sc**(i-1)
for j in xrange(i+interval, max_scale+1, interval):
im_scaled = resize(im_scaled, 0.5)
ims[-1 + j] = im_scaled
scale[-1 + j] = 0.5*scale[-1 + j - interval]
assert None not in ims
return ims, scale
#imrotate = scipy.misc.imrotate
def imrotate(*args):
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return scipy.misc.imrotate(*args)
def from_fig_slow(fig = None, tight = True):
ext = 'png'
if fig is None:
fig = pylab.gcf()
IO = StringIO()
if tight:
pylab.savefig(IO, format = ext, bbox_inches = 'tight')
else:
pylab.savefig(IO, format = ext)
IO.seek(0)
return from_pil(Image.open(IO))
# def from_fig_fast(fig = None, tight = True):
# ext = 'raw'
# if fig is None:
# fig = pylab.gcf()
# IO = StringIO()
# pylab.savefig(IO, format = ext)
# IO.seek(0)
# w, h = fig.canvas.get_width_height()
# return np.fromstring(IO.buf, dtype = np.uint8).reshape((600, -1, 4))
# def from_fig(fig = None):
# """
# http://www.icare.univ-lille1.fr/wiki/index.php/How_to_convert_a_matplotlib_figure_to_a_numpy_array_or_a_PIL_image
# @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
# @param fig a matplotlib figure
# @return a numpy 3D array of RGBA values
# """
# if fig is None:
# fig = pylab.gcf()
# # draw the renderer
# fig.canvas.draw()
# # Get the RGBA buffer from the figure
# w,h = fig.canvas.get_width_height()
# buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
# buf.shape = (h, w, 4)
# # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel | |
wavelet object used in the creation of `wt`.
weighting_function : function
Function used in the creation of `wt`.
signal_dtype : dtype
dtype of signal used in the creation of `wt`.
deep_copy : bool
If true (default), the mother wavelet object used in the creation of
the wavelet object will be fully copied and accessible through
wavelet.motherwavelet; if false, wavelet.motherwavelet will be a
reference to the motherwavelet object (that is, if you change the
mother wavelet object, you will see the changes when accessing the
mother wavelet through the wavelet object - this is NOT good for
tracking how the wavelet transform was computed, but setting
deep_copy to False will save memory).
Returns
-------
Returns an instance of the Wavelet class.
"""
from copy import deepcopy
self.coefs = wt[:,0:wavelet.len_signal]
if wavelet.len_signal != wavelet.len_wavelet:
self._pad_coefs = wt[:,wavelet.len_signal:]
else:
self._pad_coefs = None
if deep_copy:
self.motherwavelet = deepcopy(wavelet)
else:
self.motherwavelet = wavelet
self.weighting_function = weighting_function
self._signal_dtype = signal_dtype
def get_gws(self):
"""Calculate Global Wavelet Spectrum.
References
----------
<NAME>., and <NAME>, 1998: A Practical Guide to Wavelet
Analysis. Bulletin of the American Meteorological Society, 79, 1,
pp. 61-78.
"""
gws = self.get_wavelet_var()
return gws
def get_wes(self):
"""Calculate Wavelet Energy Spectrum.
References
----------
<NAME>., and <NAME>, 1998: A Practical Guide to Wavelet
Analysis. Bulletin of the American Meteorological Society, 79, 1,
pp. 61-78.
"""
from scipy.integrate import trapz
coef = 1. / (self.motherwavelet.fc * self.motherwavelet.cg)
wes = coef * trapz(np.power(np.abs(self.coefs), 2), axis = 1);
return wes
def get_wps(self):
"""Calculate Wavelet Power Spectrum.
References
----------
<NAME>., and <NAME>, 1998: A Practical Guide to Wavelet
Analysis. Bulletin of the American Meteorological Society, 79, 1,
pp. 61-78.
"""
wps = (1./ self.motherwavelet.len_signal) * self.get_wes()
return wps
def get_wavelet_var(self):
"""Calculate Wavelet Variance (a.k.a. the Global Wavelet Spectrum of
Torrence and Compo (1998)).
References
----------
<NAME>., and <NAME>, 1998: A Practical Guide to Wavelet
Analysis. Bulletin of the American Meteorological Society, 79, 1,
pp. 61-78.
"""
coef = self.motherwavelet.cg * self.motherwavelet.fc
wvar = (coef / self.motherwavelet.len_signal) * self.get_wes()
return wvar
def scalogram(self, show_coi=False, show_wps=False, ts=None, time=None,
use_period=True, ylog_base=None, xlog_base=None,
origin='top', figname=None):
""" Scalogram plotting routine.
Creates a simple scalogram, with optional wavelet power spectrum and
time series plots of the transformed signal.
Parameters
----------
show_coi : bool
Set to True to see Cone of Influence
show_wps : bool
Set to True to see the Wavelet Power Spectrum
ts : array
1D array containing time series data used in wavelet transform. If set,
time series will be plotted.
time : array of datetime objects
1D array containing time information
use_period : bool
Set to True to see figures use period instead of scale
ylog_base : float
If a log scale is desired, set `ylog_base` as float. (for log 10, set
ylog_base = 10)
xlog_base : float
If a log scale is desired, set `xlog_base` as float. (for log 10, set
xlog_base = 10) *note that this option is only valid for the wavelet power
spectrum figure.
origin : 'top' or 'bottom'
Set origin of scale axis to top or bottom of figure
Returns
-------
None
Examples
--------
Create instance of SDG mother wavelet, normalized, using 10 scales and the
center frequency of the Fourier transform as the characteristic frequency.
Then, perform the continuous wavelet transform and plot the scalogram.
# x = numpy.arange(0,2*numpy.pi,numpy.pi/8.)
# data = numpy.sin(x**2)
# scales = numpy.arange(10)
#
# mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10), normalize = True, fc = 'center')
# wavelet = cwt(data, mother_wavelet)
# wave_coefs.scalogram(origin = 'bottom')
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pylab import poly_between
if ts is not None:
show_ts = True
else:
show_ts = False
if not show_wps and not show_ts:
# only show scalogram
figrow = 1
figcol = 1
elif show_wps and not show_ts:
# show scalogram and wps
figrow = 1
figcol = 4
elif not show_wps and show_ts:
# show scalogram and ts
figrow = 2
figcol = 1
else:
# show scalogram, wps, and ts
figrow = 2
figcol = 4
if time is None:
x = np.arange(self.motherwavelet.len_signal)
else:
x = time
if use_period:
y = self.motherwavelet.scales / self.motherwavelet.fc
else:
y = self.motherwavelet.scales
fig = plt.figure(figsize=(16, 12), dpi=160)
ax1 = fig.add_subplot(figrow, figcol, 1)
# if show wps, give 3/4 space to scalogram, 1/4 to wps
if show_wps:
# create temp axis at 3 or 4 col of row 1
axt = fig.add_subplot(figrow, figcol, 3)
# get location of axtmp and ax1
axt_pos = axt.get_position()
ax1_pos = ax1.get_position()
axt_points = axt_pos.get_points()
ax1_points = ax1_pos.get_points()
# set axt_pos left bound to that of ax1
axt_points[0][0] = ax1_points[0][0]
ax1.set_position(axt_pos)
fig.delaxes(axt)
if show_coi:
# coi_coef is defined using the assumption that you are using
# period, not scale, in plotting - this handles that behavior
if use_period:
coi = self.motherwavelet.get_coi() / self.motherwavelet.fc / self.motherwavelet.sampf
else:
coi = self.motherwavelet.get_coi()
coi[coi == 0] = y.min() - 0.1 * y.min()
xs, ys = poly_between(np.arange(0, len(coi)), np.max(y), coi)
ax1.fill(xs, ys, 'k', alpha=0.4, zorder = 2)
contf=ax1.contourf(x,y,np.abs(self.coefs)**2)
fig.colorbar(contf, ax=ax1, orientation='vertical', format='%2.1f')
if ylog_base is not None:
ax1.axes.set_yscale('log', basey=ylog_base)
if origin is 'top':
ax1.set_ylim((y[-1], y[0]))
elif origin is 'bottom':
ax1.set_ylim((y[0], y[-1]))
else:
raise OriginError('`origin` must be set to "top" or "bottom"')
ax1.set_xlim((x[0], x[-1]))
ax1.set_title('scalogram')
ax1.set_ylabel('time')
if use_period:
ax1.set_ylabel('period')
ax1.set_xlabel('time')
else:
ax1.set_ylabel('scales')
if time is not None:
ax1.set_xlabel('time')
else:
ax1.set_xlabel('sample')
if show_wps:
ax2 = fig.add_subplot(figrow,figcol,4,sharey=ax1)
if use_period:
ax2.plot(self.get_wps(), y, 'k')
else:
ax2.plot(self.motherwavelet.fc * self.get_wps(), y, 'k')
if ylog_base is not None:
ax2.axes.set_yscale('log', basey=ylog_base)
if xlog_base is not None:
ax2.axes.set_xscale('log', basey=xlog_base)
if origin is 'top':
ax2.set_ylim((y[-1], y[0]))
else:
ax2.set_ylim((y[0], y[-1]))
if use_period:
ax2.set_ylabel('period')
else:
ax2.set_ylabel('scales')
ax2.grid()
ax2.set_title('wavelet power spectrum')
if show_ts:
ax3 = fig.add_subplot(figrow, 2, 3, sharex=ax1)
ax3.plot(x, ts)
ax3.set_xlim((x[0], x[-1]))
ax3.legend(['time series'])
ax3.grid()
# align time series fig with scalogram fig
t = ax3.get_position()
ax3pos=t.get_points()
ax3pos[1][0]=ax1.get_position().get_points()[1][0]
t.set_points(ax3pos)
ax3.set_position(t)
if (time is not None) or use_period:
ax3.set_xlabel('time')
else:
ax3.set_xlabel('sample')
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close('all')
def cwt(x, wavelet, weighting_function=lambda x: x**(-0.5), deep_copy=True):
"""Computes the continuous wavelet transform of x using the mother wavelet
`wavelet`.
This function computes the continuous wavelet transform of x using an
instance a mother wavelet object.
The cwt is defined as:
T(a,b) = w(a) integral(-inf,inf)(x(t) * psi*{(t-b)/a} dt
which is a convolution. In this algorithm, the convolution in the time
domain is implemented as a multiplication in the Fourier domain.
Parameters
----------
x : 1D array
Time series to be transformed by the cwt
wavelet : Instance of the MotherWavelet class
Instance of the MotherWavelet class for a particular wavelet family
weighting_function: Function used to weight
Typically w(a) = a^(-0.5) is chosen as it ensures that the
wavelets at every scale have the same energy.
deep_copy : bool
If true (default), the mother wavelet object used in the creation of
the wavelet object will be fully copied and accessible through
wavelet.motherwavelet; if false, wavelet.motherwavelet will be a
reference to the motherwavelet object (that is, if you change the
mother wavelet object, you will see the changes when accessing the
mother wavelet through the wavelet object - this is NOT good for
tracking how the wavelet transform was computed, but setting
deep_copy to False will save memory).
Returns
-------
Returns an instance of the Wavelet class. The coefficients of the transform
can be obtain by the coefs() method (i.e. wavelet.coefs() )
Examples
--------
Create instance of SDG mother wavelet, normalized, using 10 scales and the
center frequency of the Fourier transform as the characteristic frequency.
Then, perform the continuous wavelet transform and plot the scalogram.
# x = numpy.arange(0,2*numpy.pi,numpy.pi/8.)
# data = numpy.sin(x**2)
# scales = numpy.arange(10)
#
# mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10), normalize = True, fc = 'center')
# wavelet = cwt(data, mother_wavelet)
# wave_coefs.scalogram()
References
----------
<NAME>., 2002: The Illustrated Wavelet Transform Handbook. Taylor
and Francis Group, New York/London. 353 pp.
"""
signal_dtype = | |
<gh_stars>0
import torch
import numpy as np
import pandas as pd
from transformers import ElectraModel, ElectraTokenizer, AutoTokenizer
from konlpy.tag import Mecab
import os
import re
import pickle
import json
from datautils import utils
from build_koelectra_vocab import build_vocab
# nltk.download("punkt")
# 차원 확인용 함수
def check_glove_dimension():
glove = pickle.load(open("glove.korean.pkl", 'rb'),encoding="cp949")
dim_word = len(glove['그'])
length = len(glove)
print('dimension: ' + str(dim_word))
print('key-length: ' + str(length))
def load_video_paths(args):
''' Load a list of (path,image_id tuples).'''
video_paths = []
vid = []
with open('{}/라벨링데이터/생활안전/대본X/output.json'.format(args.video_dir),'r') as annotation_file:
instances = json.load(annotation_file)
video_ids = []
for instance in instances:
video_ids.append(instance['vid'])
for video_id in video_ids:
video_paths.append(os.path.join(" {}/원천데이터/생활안전/대본X/".format(args.video_dir),video_id))
with open('{}/라벨링데이터/생활안전/대본O/output.json'.format(args.video_dir),'r') as annotation_file:
instances = json.load(annotation_file)
video_ids = []
for instance in instances:
video_ids.append(instance['vid'])
for video_id in video_ids:
video_paths.append(os.path.join("{}/원천데이터/생활안전/대본O/".format(args.video_dir),video_id))
with open('{}/라벨링데이터/스포츠/대본X/output.json'.format(args.video_dir),'r') as annotation_file:
instances = json.load(annotation_file)
video_ids = []
for instance in instances:
video_ids.append(instance['vid'])
for video_id in video_ids:
video_paths.append(os.path.join("{}/원천데이터/스포츠/대본X/".format(args.video_dir),video_id))
with open('{}/라벨링데이터/예능교양/대본O/output.json'.format(args.video_dir),'r') as annotation_file:
instances = json.load(annotation_file)
video_ids = []
for instance in instances:
video_ids.append(instance['vid'])
for video_id in video_ids:
video_paths.append(os.path.join("{}/원천데이터/예능교양/대본O/".format(args.video_dir),video_id))
with open('{}/라벨링데이터/예능교양/대본X/output.json'.format(args.video_dir),'r') as annotation_file:
instances = json.load(annotation_file)
video_ids = []
for instance in instances:
video_ids.append(instance['vid'])
for video_id in video_ids:
video_paths.append(os.path.join("{}/원천데이터/예능교양/대본X/".format(args.video_dir),video_id))
return video_paths
def multi_encoding_data_lmtokenizer(args, questions, question_id, video_id, answers, answer_candidates, mode = 'train'):
#Encode all questions
print('Encoding data')
try:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir, use_fast=True)
matrix = np.load(args.tokenizer_dir+'vocab_matrix.npy')
print('KoElectra Vocab matrix loaded:', matrix.shape)
except:
tokenizer, matrix = build_vocab(args)
questions_id = question_id
video_id_tbw = video_id
correct_answers = [int(answer) if mode!='test' else -1 for answer in answers]
num_choice = len(answer_candidates[0])
questions_encoded = np.asarray(tokenizer(questions, padding=True)['input_ids'], dtype=np.int32)
print('questions_encoded:', questions_encoded.shape)
questions_len = [len(tokenizer.tokenize(quest, add_special_tokens=True)) for quest in questions]
answer_candidates = [answer for li in answer_candidates for answer in li]
answer_maxlen = max([len(tokenizer.tokenize(answer, add_special_tokens=True)) for answer in answer_candidates])
all_answer_candidate_encoded = np.asarray(tokenizer(answer_candidates, padding=True)['input_ids'], dtype=np.int32)
all_answer_candidate_encoded = all_answer_candidate_encoded.reshape(-1, num_choice, answer_maxlen)
print('all_answer_candidate_encoded:', all_answer_candidate_encoded.shape)
all_answer_candidate_lens = [len(tokenizer.tokenize(answer, add_special_tokens=True))
for answer in np.array(answer_candidates).flatten()]
all_answer_candidate_lens = np.asarray(all_answer_candidate_lens, dtype=np.int32).reshape(-1, num_choice)
print('Writing ', args.output_pt.format(args.dataset, args.dataset, mode))
obj = {
'questions': questions_encoded,
'questions_len': questions_len,
'question_id': questions_id,
'video_ids': np.asarray(video_id_tbw),
'ans_candidates': all_answer_candidate_encoded,
'ans_candidates_len': all_answer_candidate_lens,
'answers': correct_answers,
'glove': matrix if mode == 'train' else None,
}
with open(args.output_pt.format(args.dataset, args.dataset, mode), 'wb') as f:
pickle.dump(obj,f)
def multi_encoding_data(args, vocab, questions, question_id, video_id, answers, answer_candidates, mode = 'train'):
#Encode all questions
print('Encoding data')
m = Mecab().morphs
questions_encoded = []
questions_len =[]
questions_id = question_id
all_answer_candidate_encoded = []
all_answer_candidate_lens = []
video_id_tbw=video_id
correct_answers = []
for idx, question in enumerate(questions):
question_tokens = m(question)
question_encoded = utils.encode(question_tokens, vocab['question_answer_token_to_idx'], allow_unk=True)
questions_encoded.append(question_encoded)
questions_len.append(len(question_encoded))
questions_id.append(questions_id[idx])
video_id_tbw.append(video_id[idx])
# ground truth
answer = int(answers[idx])
correct_answers.append(answer)
# answer candidates
candidates = answer_candidates[idx]
candidates_encoded = []
candidates_len = []
for answer in candidates:
answer_tokens = m(answer)
candidate_encoded = utils.encode(answer_tokens, vocab['question_answer_token_to_idx'], allow_unk=True)
candidates_encoded.append(candidate_encoded)
candidates_len.append(len(candidate_encoded))
all_answer_candidate_encoded.append(candidates_encoded)
all_answer_candidate_lens.append(candidates_len)
# Pad encoded questions
max_question_length = max(len(x) for x in questions_encoded)
for qe in questions_encoded:
while len(qe) < max_question_length:
qe.append(vocab['question_answer_token_to_idx']['<NULL>'])
questions_encoded = np.asarray(questions_encoded, dtype=np.int32)
questions_len = np.asarray(questions_len, dtype=np.int32)
print(questions_encoded.shape)
# Pad encoded answer candidates
max_answer_candidate_length = max(max(len(x) for x in candidate) for candidate in all_answer_candidate_encoded)
for ans_cands in all_answer_candidate_encoded:
for ans in ans_cands:
while len(ans) < max_answer_candidate_length:
ans.append(vocab['question_answer_token_to_idx']['<NULL>'])
all_answer_candidate_encoded = np.asarray(all_answer_candidate_encoded, dtype=np.int32)
all_answer_candidate_lens = np.asarray(all_answer_candidate_lens, dtype=np.int32)
print(all_answer_candidate_encoded.shape)
glove_matrix = None
print('Writing ', args.output_pt.format(args.dataset, args.dataset, mode))
obj = {
'questions': questions_encoded,
'questions_len': questions_len,
'question_id': questions_id,
'video_ids': np.asarray(video_id_tbw),
'ans_candidates': all_answer_candidate_encoded,
'ans_candidates_len': all_answer_candidate_lens,
'answers': correct_answers,
'glove': glove_matrix,
}
with open(args.output_pt.format(args.dataset, args.dataset, mode), 'wb') as f:
pickle.dump(obj,f)
def multichoice_encoding_data(args, vocab, questions, question_id, video_id, answers, answer_candidates, mode = 'train'):
#Encode all questions
print('Encoding data')
m = Mecab().morphs
questions_encoded = []
questions_len =[]
questions_id = question_id
all_answer_candidate_encoded = []
all_answer_candidate_lens = []
video_id_tbw=[]
correct_answers = []
for idx, question in enumerate(questions):
question_tokens = m(question)
question_encoded = utils.encode(question_tokens, vocab['question_answer_token_to_idx'], allow_unk=True)
questions_encoded.append(question_encoded)
questions_len.append(len(question_encoded))
questions_id.append(questions_id[idx])
video_id_tbw.append(video_id[idx])
# ground truth
answer = int(answers[idx]) if mode!='test' else 0 # dummy for test answer
correct_answers.append(answer)
# answer candidates
candidates = answer_candidates[idx]
candidates_encoded = []
candidates_len = []
for answer in candidates:
answer_tokens = m(answer)
candidate_encoded = utils.encode(answer_tokens, vocab['question_answer_token_to_idx'], allow_unk=True)
candidates_encoded.append(candidate_encoded)
candidates_len.append(len(candidate_encoded))
all_answer_candidate_encoded.append(candidates_encoded)
all_answer_candidate_lens.append(candidates_len)
# Pad encoded questions
max_question_length = max(len(x) for x in questions_encoded)
for qe in questions_encoded:
while len(qe) < max_question_length:
qe.append(vocab['question_answer_token_to_idx']['<NULL>'])
questions_encoded = np.asarray(questions_encoded, dtype=np.int32)
questions_len = np.asarray(questions_len, dtype=np.int32)
print(questions_encoded.shape)
# Pad encoded answer candidates
max_answer_candidate_length = max(max(len(x) for x in candidate) for candidate in all_answer_candidate_encoded)
for ans_cands in all_answer_candidate_encoded:
for ans in ans_cands:
while len(ans) < max_answer_candidate_length:
ans.append(vocab['question_answer_token_to_idx']['<NULL>'])
all_answer_candidate_encoded = np.asarray(all_answer_candidate_encoded, dtype=np.int32)
all_answer_candidate_lens = np.asarray(all_answer_candidate_lens, dtype=np.int32)
print(all_answer_candidate_encoded.shape)
glove_matrix = None
if mode in ['train']:
token_itow = {i: w for w, i in vocab['question_answer_token_to_idx'].items()}
print("Load glove from %s" % args.glove_pt)
glove = pickle.load(open(args.glove_pt, 'rb'))
dim_word = glove['the'].shape[0]
glove_matrix = []
for i in range(len(token_itow)):
vector = glove.get(token_itow[i], np.zeros((dim_word,)))
glove_matrix.append(vector)
glove_matrix = np.asarray(glove_matrix, dtype=np.float32)
print(glove_matrix.shape)
print('Writing ', args.output_pt.format(args.dataset, args.dataset, mode))
obj = {
'questions': questions_encoded,
'questions_len': questions_len,
'question_id': questions_id,
'video_ids': np.asarray(video_id_tbw),
'ans_candidates': all_answer_candidate_encoded,
'ans_candidates_len': all_answer_candidate_lens,
'answers': correct_answers,
'glove': glove_matrix,
}
with open(args.output_pt.format(args.dataset, args.dataset, mode), 'wb') as f:
pickle.dump(obj,f)
def process_question_multiChoices(args):
print('Loading data')
question_id = list([29201])
questions = list([args.que])
correct_idx = list([3])
video = args.vid
answer_candidates = np.asarray(args.answers)
video_id = []
script =[]
if 'A' in video:
sample_text = '1' + video[9:-4]
elif 'B' in video:
sample_text = '2' + video[9:-4]
elif 'C' in video:
sample_text = '3' + video[9:-4]
elif 'D' in video:
sample_text = '4' + video[9:-4]
elif 'E' in video:
sample_text = '5' + video[9:-4]
elif 'F' in video:
sample_text = '6' + video[9:-4]
elif 'G' in video:
sample_text = '7' + video[9:-4]
elif 'H' in video:
sample_text = '8' + video[9:-4]
elif 'J' in video:
sample_text = '9' + video[9:-4]
elif 'K' in video:
sample_text = '10' + video[9:-4]
elif 'L' in video:
sample_text = '11' + video[9:-4]
elif 'M' in video:
sample_text = '12' + video[9:-4]
elif 'I' in video:
sample_text = '13' + video[9:-4]
video_id.append(int(sample_text))
with open(args.vocab_json.format(args.dataset, args.dataset), 'r') as f:
vocab = json.load(f)
multi_encoding_data(args, vocab, questions, question_id, video_id, correct_idx, answer_candidates, mode='test')
def process_questions_mulchoices_lmtokenizer(args):
print('Loading data')
if args.mode in ["train", "val"]:
json_data=pd.read_json('{}/train/라벨링데이터/생활안전/대본X/output.json'.format(args.video_dir))
json_data=json_data.append(pd.read_json('{}/train/라벨링데이터/생활안전/대본O/output.json'.format(args.video_dir)))
json_data=json_data.append(pd.read_json('{}/train/라벨링데이터/스포츠/대본X/output.json'.format(args.video_dir)))
json_data=json_data.append(pd.read_json('{}/train/라벨링데이터/예능교양/대본O/output.json'.format(args.video_dir)))
json_data=json_data.append(pd.read_json('{}/train/라벨링데이터/예능교양/대본X/output.json'.format(args.video_dir)))
else:
json_data = pd.read_json('{}/test/라벨링데이터/생활안전/대본X/output.json'.format(args.video_dir))
json_data=json_data.append(pd.read_json('{}/test/라벨링데이터/생활안전/대본O/output.json'.format(args.video_dir)))
json_data=json_data.append(pd.read_json('{}/test/라벨링데이터/스포츠/대본X/output.json'.format(args.video_dir)))
json_data=json_data.append(pd.read_json('{}/test/라벨링데이터/예능교양/대본O/output.json'.format(args.video_dir)))
json_data=json_data.append(pd.read_json('{}/test/라벨링데이터/예능교양/대본X/output.json'.format(args.video_dir)))
# data 랜덤하게 split하기 위해서 permutation 사용.
if args.split_train:
json_data = json_data.iloc[np.random.RandomState(seed=args.seed).permutation(len(json_data))]
question_id = list(json_data['qid'])
questions = list(json_data['que'])
correct_idx = list(json_data['correct_idx'])
video_name = list(json_data['vid'])
answer_candidates = np.asarray(json_data['answers'])
summary = list(json_data['sum'])
video_id = []
script=[]
# script 정보 load
init_script = list(json_data['script'])
script_exi = list(json_data['script_exi'])
for idx, exi in enumerate(script_exi):
if exi == 1:
script.append(init_script[idx])
# video_id
for idx, video in enumerate(video_name):
if 'A' in video:
sample_text = '1' + video[11:-4]
elif 'B' in video:
sample_text = '2'+ video[11:-4]
elif 'C' in video:
sample_text = '3'+ video[11:-4]
elif 'D' in video:
sample_text = '4'+ video[11:-4]
elif 'E' in video:
sample_text = '5' + video[11:-4]
elif 'F' in video:
sample_text = '6' + video[11:-4]
elif 'G' in video:
sample_text = '7' + video[11:-4]
elif 'H' in video:
sample_text = '8' + video[11:-4]
elif 'J' in video:
sample_text = '9' + video[11:-4]
elif 'K' in video:
sample_text = '10' + video[11:-4]
elif 'L' in video:
sample_text = '11' + video[11:-4]
elif 'M' in video:
sample_text = '12' + video[11:-4]
elif 'I' in video:
sample_text = '13' + video[11:-4]
print("print sample_text"+sample_text)
video_id.append(int(sample_text))
print(answer_candidates.shape)
print('number of questions: %s' % len(questions))
multi_encoding_data_lmtokenizer(args, questions, question_id, video_id, correct_idx, answer_candidates, mode = args.mode)
def process_questions_mulchoices(args):
print('Loading data')
if args.mode in ["train", "val"]:
json_data=pd.read_json('{}/train/라벨링데이터/생활안전/대본X/output.json'.format(args.video_dir))
json_data=json_data.append(pd.read_json('{}/train/라벨링데이터/생활안전/대본O/output.json'.format(args.video_dir)))
json_data=json_data.append(pd.read_json('{}/train/라벨링데이터/스포츠/대본X/output.json'.format(args.video_dir)))
json_data=json_data.append(pd.read_json('{}/train/라벨링데이터/예능교양/대본O/output.json'.format(args.video_dir)))
json_data=json_data.append(pd.read_json('{}/train/라벨링데이터/예능교양/대본X/output.json'.format(args.video_dir)))
else:
json_data = pd.read_json('{}/test/라벨링데이터/생활안전/대본X/output.json'.format(args.video_dir))
json_data=json_data.append(pd.read_json('{}/test/라벨링데이터/생활안전/대본O/output.json'.format(args.video_dir)))
json_data=json_data.append(pd.read_json('{}/test/라벨링데이터/스포츠/대본X/output.json'.format(args.video_dir)))
json_data=json_data.append(pd.read_json('{}/test/라벨링데이터/예능교양/대본O/output.json'.format(args.video_dir)))
json_data=json_data.append(pd.read_json('{}/test/라벨링데이터/예능교양/대본X/output.json'.format(args.video_dir)))
# data 랜덤하게 split하기 위해서 permutation 사용.
if args.split_train:
json_data = json_data.iloc[np.random.RandomState(seed=args.seed).permutation(len(json_data))]
question_id = list(json_data['qid'])
questions = list(json_data['que'])
correct_idx = list(json_data['correct_idx'])
video_name = list(json_data['vid'])
answer_candidates = np.asarray(json_data['answers'])
summary = list(json_data['sum'])
m = Mecab().morphs
video_id = []
script=[]
# script 정보 load
init_script = list(json_data['script'])
script_exi = list(json_data['script_exi'])
for idx, exi in enumerate(script_exi):
if exi == 1:
script.append(init_script[idx])
# video_id
for idx, video in enumerate(video_name):
if 'A' in video:
sample_text = '1' + video[11:-4]
elif 'B' in video:
sample_text = '2'+ video[11:-4]
elif 'C' in video:
sample_text = '3'+ video[11:-4]
elif 'D' in video:
sample_text = '4'+ video[11:-4]
elif 'E' in video:
sample_text = '5' + video[11:-4]
elif 'F' in video:
sample_text = '6' + video[11:-4]
elif 'G' in video:
sample_text = '7' + video[11:-4]
elif 'H' in video:
sample_text = '8' + video[11:-4]
| |
<reponame>ztanml/ptpqp
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 19 15:21:45 2015
@author: zhaoshiwen
@email: <EMAIL>
"""
import numpy as np
class MELD:
def __init__(self, Y, Yt, k, Phi = np.array([])):
"""
Yt: the type of y_j
0: categorical: levels are 0,1,2,...
1: distribution with mean across the real line
2: distribution with positive mean
"""
(p,n) = Y.shape
self.p = p
self.n = n
self.d = np.zeros(p, dtype=np.int8)
for j in range(p):
if Yt[j] == 0:
self.d[j] = np.amax(Y[j,:]) + 1
else:
self.d[j] = 1
self.k = k
self.Yt = np.copy(Yt)
self.alpha = np.array([0.1]*k)
self.alpha0 = sum(self.alpha)
self.lambda2 = self.alpha/(self.alpha0*(self.alpha0+1))
## the diagonal matrix for second cross moment
self.lambda3 = 2*self.alpha/(self.alpha0*(self.alpha0+1)*(self.alpha0+2))
self.Phi = np.zeros(p,dtype=object)
self.X = np.zeros(p,dtype=object)
self.Yd = np.zeros(p,dtype=object)
for j in range(p):
if Yt[j] == 0:
Y_j = np.zeros((n,self.d[j]))
for i in range(n):
Y_j[i,int(Y[j,i])] = 1
self.Yd[j] = Y_j
self.Phi[j] = np.zeros((k,self.d[j]))
self.X[j] = np.zeros((k,self.d[j]))
if Phi.size != 0:
for h in range(k):
self.Phi[j][h,:] = Phi[j][h,:]
self.X[j][h,:] = np.sqrt(self.Phi[j][h,:])
else:
for h in range(k):
self.Phi[j][h,:] = np.random.dirichlet([100.0]*self.d[j])
self.X[j][h,:] = np.sqrt(self.Phi[j][h,:])
else:
self.Yd[j] = np.copy(Y[j,:])
self.Phi[j] = np.zeros(k)
if Phi.size != 0:
for h in range(k):
self.Phi[j][h] = Phi[j][h]
def calM1(self): # initialize Phi
self._M1 = np.zeros(self.p,dtype=object)
for j in range(self.p):
if self.Yt[j] == 0:
M1_j = np.zeros(self.d[j])
for i in range(self.n):
M1_j = M1_j + self.Yd[j][i,:]
M1_j = M1_j/self.n
self._M1[j] = M1_j
# for h in range(self.k):
# self.Phi[j][h,:] = M1_j[:]
# self.X[j][h,:] = np.sqrt(M1_j[:])
else:
self._M1[j] = np.sum(self.Yd[j])/self.n
for h in range(self.k):
self.Phi[j][h] = self._M1[j]
def calM2(self):
if not hasattr(self,'_M1'):
self.calM1()
self._E2 = np.zeros((self.p,self.p),dtype=object)
self._M2 = np.zeros((self.p,self.p),dtype=object)
for j1 in range(self.p):
for j2 in range(self.p):
E2_j1j2 = np.dot(self.Yd[j1].transpose(),self.Yd[j2])/self.n
M2_j1j2 = E2_j1j2 - \
self.alpha0/(self.alpha0+1)*self._outer(self._M1[j1],self._M1[j2])
self._E2[j1,j2] = E2_j1j2
self._M2[j1,j2] = M2_j1j2
def calM2_bar(self):
if not hasattr(self, '_M2'):
self.calM2()
self._M2_bar = np.zeros((self.p,self.p),dtype=object)
self._f2d = 0
for j in range(self.p):
for t in range(self.p):
if t != j:
M2_bar_jt = self._M2[j,t] - \
np.dot(self.Phi[j].transpose()*self.lambda2,self.Phi[t])
self._M2_bar[j,t] = M2_bar_jt
self._f2d = self._f2d + self.d[j]*self.d[t]
self._f2d = self._f2d/2
def calM3(self):
if not hasattr(self, '_M2'):
self.calM2_bar()
# self._E3 = np.zeros((self._p,self._p,self._p),dtype=object)
# this E3 stores the first two lines of M3
self._M3 = np.zeros((self.p,self.p,self.p),dtype=object)
for j in range(self.p):
#print j
mu_j = self._M1[j]
for s in range(self.p):
if s != j:
mu_s = self._M1[s]
for t in range(self.p):
if t != s and t != j:
mu_t = self._M1[t]
E3_jst = self._calE3(self.Yd[j],\
self.Yd[s],self.Yd[t])
E3_jst = E3_jst - \
self.alpha0/(self.alpha0+2)*(\
self._calE3bbm(self.Yd[j],self.Yd[s],mu_t) + \
self._calE3bmb(self.Yd[j],mu_s,self.Yd[t]) + \
self._calE3mbb(mu_j,self.Yd[s],self.Yd[t]))
M3_jst = E3_jst/self.n + \
2*self.alpha0**2/(self.alpha0+1)/(self.alpha0+2)*\
self._outer(mu_j,mu_s,mu_t)
self._M3[j,s,t] = M3_jst
def calM3_bar(self):
if not hasattr(self, '_M3'):
self.calM3()
self._M3_bar = np.zeros((self.p,self.p,self.p),dtype=object)
self._f3d = 0
for j in range(self.p):
Phi_j = self.Phi[j]
for s in range(self.p):
if s!=j:
Phi_s = self.Phi[s]
for t in range(self.p):
if t != s and t != j:
Phi_t = self.Phi[t]
self._M3_bar[j,s,t] = self._mytensorprod(self.lambda3,\
Phi_j,Phi_s,Phi_t)
self._M3_bar[j,s,t] = self._M3[j,s,t] - \
self._M3_bar[j,s,t]
self._f3d = self._f3d + self.d[j]*self.d[s]*self.d[t]
self._f3d = self._f3d/6
self._f3d = self._f3d + self._f2d
def initializeWeight_M2(self):
self._W2 = np.zeros((self.p,self.p),dtype=object)
for j in range(self.p):
for t in range(self.p):
if t != j:
self._W2[j,t] = np.ones((self.d[j],self.d[t]))
if self.d[j] == 1 and self.d[t] == 1:
self._W2[j,t] = 1.0
elif self.d[j] == 1 or self.d[t] == 1:
self._W2[j,t] = np.hstack(self._W2[j,t])
def initializeWeight_M3(self):
if not hasattr(self,'_W2'):
self.initializeWeight_M2()
self._W3 = np.zeros((self.p,self.p,self.p),dtype=object)
for j in range(self.p):
for s in range(self.p):
if s != j:
for t in range(self.p):
if t!=s and t!=j:
self._W3[j,s,t] = np.ones((self.d[j],self.d[s],self.d[t]))
def updateWeight_M2(self):
for j in range(self.p):
for t in range(self.p):
if t != j:
E2jt = (np.dot(self.Phi[j].transpose()*self.alpha,self.Phi[t]) +
self._outer(np.dot(self.Phi[j].transpose(),self.alpha),np.dot(self.Phi[t].transpose(),self.alpha)))
E2jt = E2jt**2
if self.d[j] == 1 and self.d[t] == 1:
self._W2[j,t] = np.sum(self.Phi[j]**2
* self.Phi[t]**2 * self.alpha)
self._W2[j,t]= (self._W2[j,t] +
np.dot(self.Phi[j]**2,self.alpha)*np.dot(self.Phi[t]**2,self.alpha))
self._W2[j,t] = (self._W2[j,t] - E2jt)/self.alpha0/(self.alpha0+1)
self._W2[j,t] = 1.0/self._W2[j,t]
elif self.d[j] == 1:
self._W2[j,t] = np.zeros(self.d[t])
for c_t in range(int(self.d[t])):
self._W2[j,t][c_t] = np.sum(self.Phi[j]**2
* self.Phi[t][:,c_t]**2 * self.alpha)
self._W2[j,t][c_t] = (self._W2[j,t][c_t] +
np.dot(self.Phi[j]**2,self.alpha)*np.dot(self.Phi[t][:,c_t]**2,self.alpha))
self._W2[j,t] = (self._W2[j,t] - E2jt)/self.alpha0/(self.alpha0+1)
self._W2[j,t] = 1.0/self._W2[j,t]
elif self.d[t] == 1:
self._W2[j,t] = np.zeros(self.d[j])
for c_j in range(int(self.d[j])):
self._W2[j,t][c_j] = np.sum(self.Phi[j][:,c_j]**2
* self.Phi[t]**2 * self.alpha)
self._W2[j,t][c_j] = (self._W2[j,t][c_j] +
np.dot(self.Phi[j][:,c_j]**2,self.alpha)*np.dot(self.Phi[t]**2,self.alpha))
self._W2[j,t] = (self._W2[j,t] - E2jt)/self.alpha0/(self.alpha0+1)
self._W2[j,t] = 1.0/self._W2[j,t]
else:
self._W2[j,t] = np.zeros((self.d[j],self.d[t]))
for c_j in range(int(self.d[j])):
for c_t in range(int(self.d[t])):
self._W2[j,t][c_j,c_t] = np.sum(self.Phi[j][:,c_j]**2
* self.Phi[t][:,c_t]**2 * self.alpha)
self._W2[j,t][c_j,c_t] = (self._W2[j,t][c_j,c_t] +
np.dot(self.Phi[j][:,c_j]**2,self.alpha)*np.dot(self.Phi[t][:,c_t]**2,self.alpha))
self._W2[j,t] = (self._W2[j,t] - E2jt)/self.alpha0/(self.alpha0+1)
self._W2[j,t] = 1.0/self._W2[j,t]
def updateWeight_M3(self):
E2xx = np.diag(self.alpha) + np.outer(self.alpha,self.alpha)
E2xx = E2xx/self.alpha0/(self.alpha0+1)
E3xxx = np.zeros((self.k,self.k,self.k))
for h1 in range(self.k):
for h2 in range(self.k):
for h3 in range(self.k):
if h1 != h2 and h1 != h3:
E3xxx[h1,h2,h3] = self.alpha[h1]*self.alpha[h2]*self.alpha[h3]
elif h1 == h2 and h1 != h3:
E3xxx[h1,h2,h3] = (self.alpha[h1]+1)*self.alpha[h1]*self.alpha[h3]
elif h1 == h3 and h1 != h2:
E3xxx[h1,h2,h3] = (self.alpha[h1]+1)*self.alpha[h1]*self.alpha[h2]
elif h2 == h3 and h2 != h1:
E3xxx[h1,h2,h3] = (self.alpha[h2]+1)*self.alpha[h2]*self.alpha[h1]
else: # all equal
E3xxx[h1,h2,h3] = (self.alpha[h1]+2)*(self.alpha[h1]+1)*self.alpha[h1]
E3xxx = E3xxx/self.alpha0/(self.alpha0+1)/(self.alpha0+2)
for j in range(self.p):
mu_j = np.dot(self.Phi[j].transpose(),self.alpha)/self.alpha0
for s in range(self.p):
if s!= j:
mu_s = np.dot(self.Phi[s].transpose(),self.alpha)/self.alpha0
for t in range(self.p):
if t!=s and t!=j:
mu_t = np.dot(self.Phi[t].transpose(),self.alpha)/self.alpha0
self._W3[j,s,t] = np.zeros((self.d[j],self.d[s],self.d[t]))
E3jst = 2*self.alpha0**2/(self.alpha0+1)/(self.alpha0+2)*\
self._outer(mu_j,mu_s,mu_t) - self._mytensorprod(self.lambda3,\
self.Phi[j],self.Phi[s],self.Phi[t])
E3jst = E3jst**2
for cj in range(int(self.d[j])):
for cs in range(int(self.d[s])):
for ct in range(int(self.d[t])):
Phi3jst = \
self._outer(self.Phi[j][:,cj]**2, self.Phi[s][:,cs]**2, self.Phi[t][:,ct]**2) + \
self._outer(self.Phi[j][:,cj], self.Phi[s][:,cs]**2, self.Phi[t][:,ct]**2)*2*self.alpha0*mu_j[cj]/(self.alpha0+2) + \
self._outer(self.Phi[j][:,cj]**2, self.Phi[s][:,cs], self.Phi[t][:,ct]**2)*2*self.alpha0*mu_s[cs]/(self.alpha0+2) + \
self._outer(self.Phi[j][:,cj]**2, self.Phi[s][:,cs]**2, self.Phi[t][:,ct])*2*self.alpha0*mu_t[ct]/(self.alpha0+2) + \
self._outer(self.Phi[j][:,cj], self.Phi[s][:,cs], self.Phi[t][:,ct]**2)*self.alpha0**2*mu_j[cj]*mu_s[cs]/(self.alpha0+2)**2 + \
self._outer(self.Phi[j][:,cj], self.Phi[s][:,cs]**2, self.Phi[t][:,ct])*self.alpha0**2*mu_j[cj]*mu_t[ct]/(self.alpha0+2)**2 + \
self._outer(self.Phi[j][:,cj]**2, self.Phi[s][:,cs], self.Phi[t][:,ct])*self.alpha0**2*mu_s[cs]*mu_t[ct]/(self.alpha0+2)**2
Phi3jst = Phi3jst*E3xxx
Phi2jst = \
np.outer(self.Phi[j][:,cj]**2, self.Phi[s][:,cs]**2)*self.alpha0**2*mu_t[ct]**2/(self.alpha0+2)**2 + \
np.outer(self.Phi[s][:,cs]**2, self.Phi[t][:,ct]**2)*self.alpha0**2*mu_j[cj]**2/(self.alpha0+2)**2 + \
np.outer(self.Phi[j][:,cj]**2, self.Phi[t][:,ct]**2)*self.alpha0**2*mu_s[cs]**2/(self.alpha0+2)**2
Phi2jst = Phi2jst*E2xx
self._W3[j,s,t][cj,cs,ct] = np.sum(Phi3jst) + np.sum(Phi2jst) - E3jst[cj,cs,ct]
self._W3[j,s,t] = 1.0/self._W3[j,s,t]
def estimatePhiGrad_M2(self,S, prt = False, step = 1.0):
if not hasattr(self,'_M2_bar'):
self.calM2_bar()
if not hasattr(self,'_W2'):
self.initializeWeight_M2()
p = self.p
d = self.d
k = self.k
if step == 1.0:
beta = 1.0
else:
beta = 0.6
iteration = S
Q2 = [0]*(iteration)
PHI = np.zeros(iteration,dtype=object)
for ii in range(iteration):
if prt:
print(ii)
for h in range(k):
for j in range(p):
phi_jh = self.Phi[j][h,:] if self.Yt[j] == 0 else self.Phi[j][h]
for t in range(p):
if t != j:
phi_th = self.Phi[t][h,:] if self.Yt[t] == 0 else self.Phi[t][h]
self._M2_bar[j,t] = self._M2_bar[j,t] + \
self.lambda2[h]*self._outer(phi_jh,phi_th)
for j in range(p):
a2_jh = np.zeros(d[j])
b2_jh = np.zeros(d[j])
sub_p = range(p)
for t in sub_p:
if t != j:
phi_th = self.Phi[t][h,:] if self.Yt[t] == 0 else self.Phi[t][h]
#a2_jh = a2_jh + np.dot(self._M2_bar[j,t], phi_th)
a2_jh = a2_jh + np.dot(self._M2_bar[j,t]*self._W2[j,t], phi_th)
#b2_jh = b2_jh + np.sum(phi_th**2)
b2_jh = b2_jh + np.dot(self._W2[j,t],phi_th**2)
a2_jh = -2.0*self.lambda2[h] * a2_jh
b2_jh = self.lambda2[h]**2 * b2_jh
a_jh = a2_jh
b_jh = b2_jh
e = step*0.5/b_jh
if self.Yt[j] == 0:
self.Phi[j][h,:] = self.Phi[j][h,:] - e*(a_jh + 2.0*b_jh*self.Phi[j][h,:])
#print (a_jh + 2*b_jh*self.Phi[j][h,:])
self.Phi[j][h,:] = np.abs(self.Phi[j][h,:])/np.sum(np.abs(self.Phi[j][h,:]))
elif self.Yt[j] == 1:
self.Phi[j][h] = self.Phi[j][h] - e*(a_jh + 2.0*b_jh*self.Phi[j][h])
else:
self.Phi[j][h] = self.Phi[j][h] - e*(a_jh + 2.0*b_jh*self.Phi[j][h])
self.Phi[j][h] = np.abs(self.Phi[j][h])
# recover M2_bar
for j in range(p):
phi_jh = self.Phi[j][h,:] if self.Yt[j] == 0 else self.Phi[j][h]
for t in range(p):
if t != j:
phi_th = self.Phi[t][h,:] if self.Yt[t] == 0 else self.Phi[t][h]
self._M2_bar[j,t] = self._M2_bar[j,t] - \
self.lambda2[h]*self._outer(phi_jh,phi_th)
PHI[ii] = np.zeros(p,dtype=object)
diff = 0
#print j, id(self.Phi[50][0])
for j in range(p):
if self.Yt[j] == 0:
Phi_j = np.zeros((k,self.d[j]))
Phi_j = np.copy(self.Phi[j][:,:])
PHI[ii][j] = Phi_j
else:
Phi_j = np.zeros(k)
Phi_j[:] = self.Phi[j][:]
PHI[ii][j] = Phi_j
if ii > 0:
diff = diff + np.sum((Phi_j - PHI[ii-1][j])**2)
for t in range(j+1,p):
Q2[ii] = Q2[ii] + np.sum((self._M2_bar[j,t])**2*self._W2[j,t])
if ii > 0:
if abs(Q2[ii] - Q2[ii-1])/self._f2d < 1e-5:
return {'Q2': Q2, 'PHI': PHI, 'iter': ii}
step = step*beta
return {'Q2': Q2, 'PHI': PHI, 'iter': S}
def estimatePhiGrad_M2M3(self,S, prt = False, step = 1.0):
if not hasattr(self,'_M2_bar'):
self.calM2_bar()
if not hasattr(self,'_W2'):
self.initializeWeight_M2()
if not hasattr(self,'_M3_bar'):
self.calM3_bar()
if not hasattr(self,'_W3'):
self.initializeWeight_M3()
p = self.p
d = self.d
k = self.k
| |
\
"filename*=UTF-8''{utf_filename}".format(
utf_filename=quote(basename.encode('utf-8'))
)
os.remove(fund_file_path)
return response
@f_app_blueprint.route('/del_file', methods=['POST', 'GET'])
@login_required
@permission
def del_file():
"""
删除文件
:by hdhuang
:return:
"""
fid = request.json['fid']
fund_file = FundFile.query.get(fid)
logger.warning("用户{}删除文件ID {} 文件名 {}".format(current_user.username,fid,fund_file.show_name))
db.session.delete(fund_file)
db.session.commit()
logger.warning("用户{}删除文件ID {} 成功".format(current_user.username,fid))
return json.dumps({"status": "ok"})
@f_app_blueprint.route('/maintain_acc')
def maintain_acc():
fof_list = cache.get(str(current_user.id))
fund_list = set()
primary_list = []
for i in fof_list:
primary_list.append(i['primary'])
for x in i['child']:
batch = FUND_ESSENTIAL.query.filter_by(wind_code_s=x['code']).first()
if batch is None:
fund = FoFModel.query.filter_by(wind_code=x['code']).first()
else:
fund = FoFModel.query.filter_by(wind_code=batch.wind_code).first()
fund_list.add(fund)
fund_list = [ i.to_json() for i in fund_list]
return render_template('maintain_acc.html',fof_list=fof_list,fund_list=fund_list,primary_list=primary_list)
@f_app_blueprint.route("/show_acc/<string:wind_code>", methods=['POST', 'GET'])
@login_required
def show_acc(wind_code):
"""
基金的所有净值记录
:param wind_code:基金代码
:by hdhuang
:return:
"""
acc_data = data_handler.get_fund_nav_by_wind_code(wind_code, limit=0)
if acc_data is not None:
acc_data.reset_index(inplace=True)
acc_data = acc_data.to_dict(orient='records')
acc = [{"nav_acc": "%0.4f" % i['nav_acc'], "pct": "%0.4f" % i['pct'],
"nav_date": i['nav_date'].strftime('%Y-%m-%d'), "nav": "%0.4f" % i['nav']} for i in acc_data]
return json.dumps({"data": acc})
else:
return jsonify(data="")
@f_app_blueprint.route("/del_acc", methods=['POST', 'GET'])
@login_required
def del_acc():
"""
删除净值记录
:by hdhuang
:return:
"""
nav_date = request.json['nav_date']
wind_code = request.json['wind_code']
acc_record = FUND_NAV.query.filter(and_(FUND_NAV.wind_code == wind_code),
(FUND_NAV.nav_date == nav_date)).first()
db.session.delete(acc_record)
db.session.commit()
del_nav_str = "call proc_delete_fund_nav_by_wind_code(:wind_code, :nav_date)"
with get_db_session(get_db_engine()) as session:
logger.info("开始执行存储过程")
session.execute(del_nav_str, {'wind_code': wind_code,'nav_date':nav_date})
fof_list = get_all_fof()
cache.set(key=str(current_user.id), value=fof_list)
logger.info("用户{}基金列表缓存已更新".format(current_user.username))
return jsonify(status='ok')
@f_app_blueprint.route('/add_acc', methods=['POST', 'GET'])
def add_acc():
"""
添加一条净值记录"
:by hdhuang
:return:
"""
acc_record = FUND_NAV(wind_code=request.json['wind_code'], nav_date=request.json['nav_date'],
nav=request.json['nav'],
nav_acc=request.json['nav_acc'], source_mark=1)
try:
db.session.add(acc_record)
db.session.commit()
sql_str = "call proc_update_fund_info_by_wind_code2(:wind_code, :force_update)"
replace_nav_str = "call proc_replace_fund_nav_by_wind_code(:wind_code, :nav_date,:force_update)"
with get_db_session(get_db_engine()) as session:
logger.info("开始执行存储过程")
session.execute(sql_str, {'wind_code': request.json['wind_code'], 'force_update': True})
session.execute(replace_nav_str,{'wind_code':request.json['wind_code'],'nav_date':request.json['nav_date'],'force_update': True})
fof_list = get_all_fof()
cache.set(key=str(current_user.id), value=fof_list)
logger.info("用户{}基金列表缓存已更新".format(current_user.username))
return jsonify(status='ok')
except exc.IntegrityError:
logger.error("这条记录的净值日期已经存在{} {}".format(request.json['wind_code'], request.json['nav_date']))
return jsonify(status='error')
@f_app_blueprint.route('/edit_acc', methods=['POST', 'GET'])
@login_required
def edit_acc():
"""
编辑已存在的一条净值记录
:by hdhuang
:return:
"""
post_data = request.json
acc_record = FUND_NAV.query.filter(and_(FUND_NAV.wind_code == post_data['wind_code']),
(FUND_NAV.nav_date == post_data['nav_date'])).first()
acc_record.nav_acc = post_data['nav_acc']
acc_record.nav = post_data['nav']
db.session.add(acc_record)
db.session.commit()
sql_str = "call proc_update_fund_info_by_wind_code2(:wind_code, :force_update)"
replace_nav_str = "call proc_replace_fund_nav_by_wind_code(:wind_code, :nav_date,:force_update)"
with get_db_session(get_db_engine()) as session:
logger.info("开始执行存储过程")
session.execute(sql_str, {'wind_code': request.json['wind_code'], 'force_update': True})
session.execute(replace_nav_str,{'wind_code':request.json['wind_code'],
'nav_date':request.json['nav_date'],'force_update':True})
return jsonify(status='ok')
@f_app_blueprint.route('/change_acc/<string:wind_code>', methods=['POST', 'GET'])
@login_required
@permission
@fund_owner
def change_acc(wind_code):
"""
加载修改净值页面
:param wind_code 基金代码
:by hdhuang
:return:
"""
if request.method == 'GET':
fof_name = code_get_name(wind_code)
fof_list = cache.get(str(current_user.id))
fof = check_code_order(wind_code)
return render_template("change_acc.html", wind_code=wind_code, name=fof_name, fof=fof, fof_list=fof_list)
@f_app_blueprint.route('/query_acc', methods=['POST', 'GET'])
@login_required
def query_acc():
"""
查询净值记录,已废弃
:return:
"""
if request.method == "POST":
post_data = request.json
acc = FUND_NAV.query.filter(and_(FUND_NAV.wind_code == post_data['wind_code']),
(FUND_NAV.nav_date == post_data['date'])).first()
if acc is not None:
return jsonify(status="ok", acc=acc.nav, nav_acc=acc.nav_acc)
else:
return jsonify(status="error")
@f_app_blueprint.route('/upload_acc/<string:wind_code>', methods=['POST', 'GET'])
@login_required
def upload_acc(wind_code):
"""
使用上传文件方式更新净值
:param wind_code 基金代码
:by hdhuang
:return:
"""
if request.method == 'POST':
if 'file[]' not in request.files:
return redirect(request.url)
file = request.files['file[]']
if file.filename == '':
return redirect(request.url)
filename = file.filename
fof = FoFModel.query.filter_by(wind_code=wind_code).first()
if fof is None:
fof_mapping = FUND_ESSENTIAL.query.filter_by(wind_code_s=wind_code).first()
fof_name = set(fof_mapping.sec_name_s)
else:
fof_name = set(fof.sec_name)
old_name = set(filename)
fof_name = set(fof_name)
acc_path = current_app.config['ACC_FOLDER']
if len(old_name & fof_name) >= 2:
if file and allowed_file(file.filename):
if path.exists(acc_path):
pass
else:
os.mkdir(acc_path)
filename = file.filename
file_path = path.join(current_app.config['ACC_FOLDER'], filename)
file.save(file_path)
fund_nav_import_csv.update_fundnav_by_file(wind_code=wind_code, file_path=file_path)
fof_list = get_all_fof()
cache.set(key=str(current_user.id), value=fof_list)
return json.dumps({"files": [{"message": "净值已更新"}]})
else:
return json.dumps({"files": [{"error": "源文件格式错误,请检查"}]})
if request.method == 'GET':
return json.dumps({"files": [{"message": "净值已更新"}]})
@f_app_blueprint.route('/asset_details/<string:wind_code>',methods=['GET','POST'])
@login_required
def asset_details(wind_code):
if request.method == 'GET':
fof_list = cache.get(str(current_user.id))
return render_template('asset_details.html', fof_list=fof_list,wind_code=wind_code)
@f_app_blueprint.route("/show_batch_asset/<string:wind_code>", methods=['POST', 'GET'])
@login_required
def show_batch_asset(wind_code):
"""
基金的所有净值记录
:param wind_code:基金代码
:by hdhuang
:return:
"""
fof_list = cache.get(str(current_user.id))
asset_list = [{"child": [x for x in i['child']]}
for i in fof_list if wind_code == i['primary'].wind_code]
if len(asset_list) > 0:
batch_data = []
for i in asset_list:
for c in i['child']:
batch_calc = FUND_NAV_CALC.query.filter_by(wind_code=c['code']).all()
for b in batch_calc:
batch_dict = b.as_dict()
batch_dict['name'] = c['name']
batch_dict['nav_date'] = batch_dict['nav_date'].strftime('%Y-%m-%d')
batch_data.append(batch_dict)
return jsonify(status='ok',data=batch_data)
else:
return jsonify(status='error')
@f_app_blueprint.route('/show_primary_asset/<string:wind_code>',methods=['GET','POST'])
def show_primary_asset(wind_code):
'''
查询母基金的全部资产,使用马老师的存储过程返回一个日期
:param wind_code: 母基金代码
:return: 所有的资产信息 select * from fund_nav_calc where wind_code = wind_code;
'''
next_nav_date_str = "select func_get_next_nav_date(:wind_code)"
with get_db_session(get_db_engine()) as session:
sql_return = session.execute(next_nav_date_str, {'wind_code': wind_code})
tag_date = sql_return.first()[0]
if tag_date is not None:
next_date_nav = calc_fof_nav(wind_code,tag_date)
next_date_nav['db'] = False
primary_fund = FUND_NAV_CALC.query.filter_by(wind_code=wind_code).all()
primary_fund = [dict(i.as_dict(),**{"db":True}) for i in primary_fund]
primary_fund.append(next_date_nav)
primary_fund = list(map(lambda x:{k:v if k !='nav_date' else v.strftime('%Y-%m-%d') for k,v in x.items()},primary_fund))
return jsonify(status='ok',data=primary_fund)
else:
return jsonify(status='error')
@f_app_blueprint.route('/confirm_asset/<string:wind_code>',methods=['GET','POST'])
def confirm_asset(wind_code):
"""
资产表确认按钮提交的数据,获取前端提交的数据中的日期,首先在fund info表中查询基金代码和成立时间 True 新基金;
在trade_date获取提交日期的上一个工作日,检查fund nav calc表中是否存在上一个交易日的数据 True
:param wind_code:
:return:
"""
if request.method == 'POST':
post_data = request.json
if post_data['check']:
return jsonify(status='ok')
else:
del post_data['db'],post_data['check']
post_data['wind_code'] = wind_code
new_calc_record = FUND_NAV_CALC(**post_data)
# print(post_data)
acc_record = FUND_NAV(wind_code=post_data['wind_code'], nav_date=post_data['nav_date'],
nav=post_data['nav'],
nav_acc=post_data['nav'], source_mark=3)
db.session.add(new_calc_record)
db.session.add(acc_record)
#return jsonify(status='ok')
try:
db.session.commit()
sql_str = "call proc_update_fund_info_by_wind_code2(:wind_code, :force_update)"
replace_nav_str = "call proc_replace_fund_nav_by_wind_code(:wind_code, :nav_date,:force_update)"
with get_db_session(get_db_engine()) as session:
logger.info("开始执行存储过程")
session.execute(sql_str, {'wind_code': post_data['wind_code'], 'force_update': True})
session.execute(replace_nav_str,
{'wind_code': post_data['wind_code'], 'nav_date': post_data['nav_date'],'force_update':False})
fof_list = get_all_fof()
cache.set(key=str(current_user.id), value=fof_list)
logger.info("用户{}基金列表缓存已更新".format(current_user.username))
return jsonify(status='ok')
except exc.IntegrityError:
logger.error("这条记录的净值日期已经存在{} {}".format(post_data['wind_code'], post_data['nav_date']))
return jsonify(status='error')
@f_app_blueprint.route('/calendar/<string:wind_code>', methods=['GET', 'POST'])
@login_required
@fund_owner
def calendar(wind_code):
"""
根据基金代码获取相关的日程
:param wind_code: 基金代码
:by hdhuang
:return:
"""
if request.method == "GET":
fof_list = cache.get(str(current_user.id))
fof = check_code_order(wind_code)
return render_template("calendar.html", wind_code=wind_code, fof=fof, fof_list=fof_list)
if request.method == "POST":
return json.dumps({"status": "ok"})
@f_app_blueprint.route('/query_cal/<string:wind_code>/', methods=['GET', 'POST'])
@login_required
def query_cal(wind_code):
"""
根据基金代码获取相关的日程
:param wind_code: 基金代码
:by hdhuang
:return:
"""
start_unix = request.args['start']
end_unix = request.args['end']
start = datetime.datetime.fromtimestamp(float(start_unix)).strftime('%Y-%m-%d')
end = datetime.datetime.fromtimestamp(float(end_unix)).strftime('%Y-%m-%d')
events = FUND_EVENT.query.filter(and_(FUND_EVENT.wind_code == wind_code,
or_(FUND_EVENT.create_date.between(start, end),
FUND_EVENT.remind_date.between(start, end),
FUND_EVENT.event_date.between(start, end)))).all()
if len(events) > 0:
events_list = [{"title": i.event_type, "start": i.remind_date.strftime('%Y-%m-%d'),
'end': i.event_date.strftime('%Y-%m-%d'), 'create': i.create_date.strftime('%Y-%m-%d'),
"desc": i.description, "id": i.id, "color": i.color, 'Private': i.private,
"user": i.create_user, "handle": i.handle_status} for i in events]
key = ['create', 'start', 'end']
el = []
for i in events_list:
if i['user'] != current_user.username and i['Private'] == True:
pass
else:
for x in key:
eo = {}
if x == 'create':
eo['tag'] = "<i class='fa fa-battery-0'></i>"
elif x == "start":
eo['tag'] = "<i class='fa fa-battery-2'></i>"
elif x == "end":
eo['tag'] = "<i class='fa fa-battery-full'></i>"
eo['start'] = i[x]
eo['id'] = i['id']
eo['title'] = i['title']
eo['desc'] = i['desc']
eo['color'] = i['color']
eo['start_time'] = i['start']
eo['create_time'] = i['create']
eo['end_time'] = i['end']
eo['allDay'] = False
eo['Private'] = i['Private']
eo['handle'] = i['handle']
if i['user'] != current_user.username:
eo['user'] = i['user']
el.append(eo)
return json.dumps(el)
else:
return jsonify(status="empty")
@f_app_blueprint.route('/edit_cal/<string:wind_code>', methods=['GET', 'POST'])
@login_required
def edit_cal(wind_code):
"""
使用表单为一直基金添加相应的事件
:param wind_code: 基金代码
:by hdhuang
:return: string
"""
if request.method == 'POST':
event = FUND_EVENT.query.filter_by(id=request.form['id']).first()
event.event_type = request.form['title']
event.remind_date = request.form['start']
event.description = request.form['description']
event.color = request.form['color']
event.wind_code = wind_code
event.event_date = request.form['end']
event.create_date = request.form['create']
event.private = request.form['Private']
event.create_user = current_user.username
if request.form['Private'] == 'false':
event.private = 0
else:
event.private = 1
db.session.commit()
return "ok"
@f_app_blueprint.route('/add_cal/<string:wind_code>', methods=['GET', 'POST'])
def add_cal(wind_code):
"""
删除和基金相关的日程
:param wind_code: 基金代码
:by hdhuang
:return: string
"""
if request.method == 'POST':
if request.form['Private'] == 'false':
private = 0
else:
private = 1
event = FUND_EVENT(wind_code=wind_code, event_date=request.form['end'], event_type=request.form['title'],
create_date=request.form['create'], remind_date=request.form['start'], private=private,
description=request.form['description'], color=request.form['color'],
create_user=current_user.username)
db.session.add(event)
db.session.commit()
return "ok"
@f_app_blueprint.route('/del_cal', methods=['GET', 'POST'])
@login_required
def del_cal():
"""
根据日程id删除对应的日程事件
:by hdhuang
:return: string
"""
event = FUND_EVENT.query.get(request.args['id'])
db.session.delete(event)
db.session.commit()
return "ok"
@f_app_blueprint.route('/all_cal', methods=['GET', 'POST'])
@login_required
def all_cal():
"""
加载所有日程页面
:by hdhuang
:return:
"""
fof_list = get_all_fof()
return render_template('allcalendar.html', fof_list=fof_list)
@f_app_blueprint.route('/query_all_cal/')
@login_required
def query_all_cal():
"""
和用户相关的所有日程
:by hdhuang
:return: json
"""
start_unix = request.args['start']
end_unix = request.args['end']
start = datetime.datetime.fromtimestamp(float(start_unix)).strftime('%Y-%m-%d')
end = datetime.datetime.fromtimestamp(float(end_unix)).strftime('%Y-%m-%d')
fof_list = current_user.fofs
events = FUND_EVENT.query.filter(and_(FUND_EVENT.wind_code.in_([i.wind_code for i in fof_list]),
or_(FUND_EVENT.create_date.between(start, end),
FUND_EVENT.remind_date.between(start, end),
FUND_EVENT.event_date.between(start, end)))).all()
if len(events) > 0:
events_list = [{"title": i.event_type, "start": i.remind_date.strftime('%Y-%m-%d'),
'end': i.event_date.strftime('%Y-%m-%d'), 'create': i.create_date.strftime('%Y-%m-%d'),
"desc": i.description, "id": i.id, "color": i.color, 'Private': i.private,
"user": i.create_user, "handle": i.handle_status, "name": code_get_name(i.wind_code)} for i in
events]
key = ['create', 'start', 'end']
el = []
for i in events_list:
if i['user'] != current_user.username and i['Private'] == True:
pass
else:
for x in key:
eo = {}
if x == 'create':
eo['tag'] = "<i class='fa fa-battery-0'></i>"
elif x == "start":
eo['tag'] = "<i class='fa fa-battery-2'></i>"
elif x == "end":
eo['tag'] = "<i class='fa fa-battery-full'></i>"
eo['start'] = i[x]
eo['id'] = i['id']
eo['title'] = i['title']
eo['desc'] = i['desc']
eo['color'] = i['color']
eo['start_time'] = i['start']
eo['create_time'] = i['create']
eo['end_time'] = i['end']
eo['allDay'] = False
eo['Private'] = i['Private']
eo['handle'] = i['handle']
eo['name'] = i['name']
if i['user'] != current_user.username:
eo['user'] = i['user']
el.append(eo)
return json.dumps(el)
else:
return jsonify(status='ok')
@f_app_blueprint.route('/benchmark/<string:wind_code>', methods=['GET', 'POST'])
@login_required
@fund_owner
def benchmark(wind_code):
"""
加载配置基金组合,可以动态配置生成组合的各种相信信息,比如策略,金额等信息
:param wind_code: 基金代码
:by hdhuang
:return:
"""
if request.method == 'GET':
fof_list = cache.get(str(current_user.id))
fof = FoFModel.query.filter_by(wind_code=wind_code).first()
query_child = FOF_FUND_PCT.query.filter_by(wind_code_p=wind_code).first()
if query_child is not None:
last_date = FOF_FUND_PCT.query.filter_by(wind_code_p=wind_code).order_by(
FOF_FUND_PCT.date_adj.desc()).first().date_adj
child_fof = FOF_FUND_PCT.query.filter(
and_(FOF_FUND_PCT.wind_code_p == wind_code, FOF_FUND_PCT.date_adj == last_date)).all()
child = []
for i in child_fof:
data = {}
data['name'] = code_get_name(i.wind_code_s)
data['date'] = i.date_adj.strftime("%Y-%m-%d")
data['scale'] = i.invest_scale
data['code'] = i.wind_code_s
child.append(data)
else:
child = []
return render_template("benchmark.html", wind_code=wind_code, fof=fof, child=child, fof_list=fof_list)
if | |
== others_dict[k_o]) > 0 else 0
# store
ref_accs.append(running_ref_acc)
acc_025ious.append(running_acc_025iou)
acc_05ious.append(running_acc_05iou)
# aggregate
scores["overall"][k_o] = {}
scores["overall"][k_o]["ref_acc"] = np.mean(ref_accs)
scores["overall"][k_o]["acc@0.25iou"] = np.mean(acc_025ious)
scores["overall"][k_o]["acc@0.5iou"] = np.mean(acc_05ious)
ref_accs, acc_025ious, acc_05ious = [], [], []
for i in range(masks.shape[0]):
running_ref_acc = np.mean(ref_acc[i])
running_acc_025iou = ious[i][ious[i] >= 0.25].shape[0] / ious[i].shape[0]
running_acc_05iou = ious[i][ious[i] >= 0.5].shape[0] / ious[i].shape[0]
# store
ref_accs.append(running_ref_acc)
acc_025ious.append(running_acc_025iou)
acc_05ious.append(running_acc_05iou)
# aggregate
scores["overall"]["overall"] = {}
scores["overall"]["overall"]["ref_acc"] = np.mean(ref_accs)
scores["overall"]["overall"]["acc@0.25iou"] = np.mean(acc_025ious)
scores["overall"]["overall"]["acc@0.5iou"] = np.mean(acc_05ious)
# report
print("\nstats:")
for k_s in stats.keys():
for k_o in stats[k_s].keys():
print("{} | {}: {}".format(k_s, k_o, stats[k_s][k_o]))
for k_s in scores.keys():
print("\n{}:".format(k_s))
for k_m in scores[k_s].keys():
for metric in scores[k_s][k_m].keys():
print("{} | {} | {}: {}".format(k_s, k_m, metric, scores[k_s][k_m][metric]))
print("\nlanguage classification accuracy: {}".format(np.mean(lang_acc)))
def get_caption_eval_dataloader(self, args, scanrefer, scanrefer_new, all_scene_list, config):
dataset = ScannetReferenceDataset(
scanrefer=scanrefer,
scanrefer_new=scanrefer_new,
scanrefer_all_scene=all_scene_list,
split="val",
name=args.dataset,
num_points=args.num_points,
use_height=(not args.no_height),
use_color=args.use_color,
use_normal=args.use_normal,
use_multiview=args.use_multiview,
lang_num_max=args.lang_num_max,
augment=False
)
# dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=4)
return dataset, dataloader
def get_caption_eval_model(self, args, dataset, device, root=CONF.PATH.OUTPUT, eval_pretrained=False):
# initiate model
input_channels = int(args.use_multiview) * 128 + int(args.use_normal) * 3 + int(args.use_color) * 3 + int(
not args.no_height)
model = JointNet(
num_class=DC.num_class,
vocabulary=dataset.vocabulary,
embeddings=dataset.glove,
num_heading_bin=DC.num_heading_bin,
num_size_cluster=DC.num_size_cluster,
mean_size_arr=DC.mean_size_arr,
input_feature_dim=input_channels,
num_proposal=args.num_proposals,
no_caption=not args.eval_caption,
num_locals=args.num_locals,
query_mode=args.query_mode,
use_lang_classifier=False,
no_reference=True,
dataset_config=DC
)
if eval_pretrained:
# load pretrained model
print("loading pretrained VoteNet...")
pretrained_model = JointNet(
num_class=DC.num_class,
vocabulary=dataset.vocabulary,
embeddings=dataset.glove,
num_heading_bin=DC.num_heading_bin,
num_size_cluster=DC.num_size_cluster,
mean_size_arr=DC.mean_size_arr,
num_proposal=args.num_proposals,
input_feature_dim=input_channels,
no_caption=True
)
pretrained_name = "PRETRAIN_VOTENET_XYZ"
if args.use_color: pretrained_name += "_COLOR"
if args.use_multiview: pretrained_name += "_MULTIVIEW"
if args.use_normal: pretrained_name += "_NORMAL"
pretrained_path = os.path.join(CONF.PATH.PRETRAINED, pretrained_name, "model.pth")
pretrained_model.load_state_dict(torch.load(pretrained_path), strict=False)
# mount
model.backbone_net = pretrained_model.backbone_net
model.vgen = pretrained_model.vgen
model.proposal = pretrained_model.proposal
else:
# load
model_name = "model_last.pth" if args.use_last else "model.pth"
model_path = os.path.join(root, args.folder, model_name)
model.load_state_dict(torch.load(model_path), strict=False)
# model.load_state_dict(torch.load(model_path))
# multi-GPU
if torch.cuda.device_count() > 1:
print("using {} GPUs...".format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
# to device
model.to(device)
# set mode
model.eval()
return model
def get_caption_eval_scannet_scene_list(self, data):
# scene_list = sorted([line.rstrip() for line in open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_{}.txt".format(split)))])
scene_list = sorted(list(set([d["scene_id"] for d in data])))
return scene_list
def get_caption_eval_data(self, args):
if args.dataset == "ScanRefer":
scanrefer_train = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_train.json")))
scanrefer_val = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_val.json")))
elif args.dataset == "ReferIt3D":
scanrefer_train = json.load(open(os.path.join(CONF.PATH.DATA, "nr3d_train.json")))
scanrefer_val = json.load(open(os.path.join(CONF.PATH.DATA, "nr3d_val.json")))
else:
raise ValueError("Invalid dataset.")
eval_scene_list = self.get_caption_eval_scannet_scene_list(scanrefer_train) if args.use_train else self.get_caption_eval_scannet_scene_list(
scanrefer_val)
scanrefer_eval = []
scanrefer_eval_new = []
for scene_id in eval_scene_list:
data = deepcopy(scanrefer_train[0]) if args.use_train else deepcopy(scanrefer_val[0])
data["scene_id"] = scene_id
scanrefer_eval.append(data)
scanrefer_eval_new_scene = []
for i in range(args.lang_num_max):
scanrefer_eval_new_scene.append(data)
scanrefer_eval_new.append(scanrefer_eval_new_scene)
print("eval on {} samples".format(len(scanrefer_eval)))
return scanrefer_eval, eval_scene_list, scanrefer_eval_new
def eval_caption(self, args):
print("initializing...")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# get eval data
scanrefer_eval, eval_scene_list, scanrefer_eval_new = self.get_caption_eval_data(args)
# get dataloader
dataset, dataloader = self.get_caption_eval_dataloader(args, scanrefer_eval, scanrefer_eval_new, eval_scene_list, DC)
# get model
model = self.get_caption_eval_model(args, dataset, device)
# evaluate
bleu, cider, rouge, meteor = eval_cap(model, device, dataset, dataloader, "val", args.folder,
force=args.force, save_interm=args.save_interm, min_iou=args.min_iou)
# report
print("\n----------------------Evaluation-----------------------")
print("[BLEU-1] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(bleu[0][0], max(bleu[1][0]), min(bleu[1][0])))
print("[BLEU-2] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(bleu[0][1], max(bleu[1][1]), min(bleu[1][1])))
print("[BLEU-3] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(bleu[0][2], max(bleu[1][2]), min(bleu[1][2])))
print("[BLEU-4] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(bleu[0][3], max(bleu[1][3]), min(bleu[1][3])))
print("[CIDEr] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(cider[0], max(cider[1]), min(cider[1])))
print("[ROUGE-L] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(rouge[0], max(rouge[1]), min(rouge[1])))
print("[METEOR] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}".format(meteor[0], max(meteor[1]), min(meteor[1])))
print()
def evaluate(self, args):
print("evaluate...")
assert args.lang_num_max == 1, 'lang max num == 1; avoid bugs'
# evaluate
if args.eval_reference: self.eval_ref(args)
if args.eval_detection: raise ValueError("UnImplemented mode!")
if args.eval_caption: self.eval_caption(args)
def get_ground_visualize_scanrefer(self, args):
scanrefer = SCANREFER_TRAIN if args.use_train else SCANREFER_VAL
all_scene_list = sorted(list(set([data["scene_id"] for data in scanrefer])))
if args.scene_id:
assert args.scene_id in all_scene_list, "The scene_id is not found"
scene_list = [args.scene_id]
else:
scene_list = sorted(list(set([data["scene_id"] for data in scanrefer])))
scanrefer = [data for data in scanrefer if data["scene_id"] in scene_list]
new_scanrefer = []
scanrefer_new = []
scanrefer_new_scene = []
scene_id = ""
for data in scanrefer:
new_scanrefer.append(data)
if scene_id != data["scene_id"]:
scene_id = data["scene_id"]
if len(scanrefer_new_scene) > 0:
scanrefer_new.append(scanrefer_new_scene)
scanrefer_new_scene = []
if len(scanrefer_new_scene) >= 1:
scanrefer_new.append(scanrefer_new_scene)
scanrefer_new_scene = []
scanrefer_new_scene.append(data)
scanrefer_new.append(scanrefer_new_scene)
return scanrefer, scene_list, scanrefer_new
def write_ply(self, verts, colors, indices, output_file):
if colors is None:
colors = np.zeros_like(verts)
if indices is None:
indices = []
file = open(output_file, 'w')
file.write('ply \n')
file.write('format ascii 1.0\n')
file.write('element vertex {:d}\n'.format(len(verts)))
file.write('property float x\n')
file.write('property float y\n')
file.write('property float z\n')
file.write('property uchar red\n')
file.write('property uchar green\n')
file.write('property uchar blue\n')
file.write('element face {:d}\n'.format(len(indices)))
file.write('property list uchar uint vertex_indices\n')
file.write('end_header\n')
for vert, color in zip(verts, colors):
file.write("{:f} {:f} {:f} {:d} {:d} {:d}\n".format(vert[0], vert[1], vert[2], int(color[0] * 255),
int(color[1] * 255), int(color[2] * 255)))
for ind in indices:
file.write('3 {:d} {:d} {:d}\n'.format(ind[0], ind[1], ind[2]))
file.close()
def write_bbox(self, bbox, mode, output_file):
"""
bbox: (cx, cy, cz, lx, ly, lz, r), center and length in three axis, the last is the rotation
output_file: string
"""
def create_cylinder_mesh(radius, p0, p1, stacks=10, slices=10):
import math
def compute_length_vec3(vec3):
return math.sqrt(vec3[0] * vec3[0] + vec3[1] * vec3[1] + vec3[2] * vec3[2])
def rotation(axis, angle):
rot = np.eye(4)
c = np.cos(-angle)
s = np.sin(-angle)
t = 1.0 - c
axis /= compute_length_vec3(axis)
x = axis[0]
y = axis[1]
z = axis[2]
rot[0, 0] = 1 + t * (x * x - 1)
rot[0, 1] = z * s + t * x * y
rot[0, 2] = -y * s + t * x * z
rot[1, 0] = -z * s + t * x * y
rot[1, 1] = 1 + t * (y * y - 1)
rot[1, 2] = x * s + t * y * z
rot[2, 0] = y * s + t * x * z
rot[2, 1] = -x * s + t * y * z
rot[2, 2] = 1 + t * (z * z - 1)
return rot
verts = []
indices = []
diff = (p1 - p0).astype(np.float32)
height = compute_length_vec3(diff)
for i in range(stacks + 1):
for i2 in range(slices):
theta = i2 * 2.0 * math.pi / slices
pos = np.array([radius * math.cos(theta), radius * math.sin(theta), height * i / stacks])
verts.append(pos)
for i in range(stacks):
for i2 in range(slices):
i2p1 = math.fmod(i2 + 1, slices)
indices.append(
np.array([(i + 1) * slices + i2, i * slices + i2, i * slices + i2p1], dtype=np.uint32))
indices.append(
np.array([(i + 1) * slices + i2, i * slices + i2p1, (i + 1) * slices + i2p1], dtype=np.uint32))
transform = np.eye(4)
va = np.array([0, 0, 1], dtype=np.float32)
vb = diff
vb /= compute_length_vec3(vb)
axis = np.cross(vb, va)
angle = np.arccos(np.clip(np.dot(va, vb), -1, 1))
if angle != 0:
if compute_length_vec3(axis) == 0:
dotx = va[0]
if (math.fabs(dotx) != 1.0):
axis = np.array([1, 0, 0]) - dotx * va
else:
axis = np.array([0, 1, 0]) - va[1] * va
axis /= compute_length_vec3(axis)
transform = rotation(axis, -angle)
transform[:3, 3] += p0
verts = [np.dot(transform, np.array([v[0], v[1], v[2], 1.0])) for v in verts]
verts = [np.array([v[0], v[1], v[2]]) / v[3] for v in verts]
return verts, indices
def get_bbox_edges(bbox_min, bbox_max):
def get_bbox_verts(bbox_min, bbox_max):
verts = [
np.array([bbox_min[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_max[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_max[2]])
]
return verts
box_verts = get_bbox_verts(bbox_min, bbox_max)
edges = [
(box_verts[0], box_verts[1]),
(box_verts[1], box_verts[2]),
(box_verts[2], box_verts[3]),
(box_verts[3], box_verts[0]),
(box_verts[4], box_verts[5]),
(box_verts[5], box_verts[6]),
(box_verts[6], box_verts[7]),
(box_verts[7], box_verts[4]),
(box_verts[0], box_verts[4]),
(box_verts[1], box_verts[5]),
(box_verts[2], box_verts[6]),
(box_verts[3], box_verts[7])
]
return edges
def get_bbox_corners(bbox):
centers, lengths = bbox[:3], bbox[3:6]
xmin, xmax = centers[0] - lengths[0] / 2, centers[0] + lengths[0] / 2
ymin, ymax = centers[1] - lengths[1] / 2, centers[1] + lengths[1] / 2
zmin, zmax = centers[2] - lengths[2] / 2, centers[2] + lengths[2] / 2
corners = []
corners.append(np.array([xmax, ymax, zmax]).reshape(1, 3))
corners.append(np.array([xmax, ymax, zmin]).reshape(1, 3))
corners.append(np.array([xmin, ymax, zmin]).reshape(1, 3))
corners.append(np.array([xmin, ymax, zmax]).reshape(1, 3))
corners.append(np.array([xmax, ymin, zmax]).reshape(1, 3))
corners.append(np.array([xmax, ymin, zmin]).reshape(1, 3))
corners.append(np.array([xmin, ymin, zmin]).reshape(1, 3))
corners.append(np.array([xmin, ymin, zmax]).reshape(1, 3))
corners = np.concatenate(corners, axis=0) # 8 x 3
| |
distance from each face
for j in range(0, nVertices):
# define the vector from the point of interest to the first point of the face
pa = np.array([vertices[j, 0]-points[i, 0], vertices[j, 1]-points[i, 1]])
# find perpendicular distance from point to current surface (vector projection)
d_vec = np.vdot(pa, unit_normals[j])*unit_normals[j]
# calculate the sign of perpendicular distance from point to current face (+ is inside, - is outside)
face_distance[i, j] = np.vdot(d_vec, unit_normals[j])
# check if the point is inside the convex hull by checking the sign of the distance
if np.all(face_distance[i] >= 0):
inside[i] = 1.0
return face_distance, inside
class MUX(Component):
""" Connect input elements into a single array """
def __init__(self, nElements, units=None):
super(MUX, self).__init__()
# set finite difference options (fd used for testing only)
self.deriv_options['check_form'] = 'central'
self.deriv_options['check_step_size'] = 1.0e-5
self.deriv_options['check_step_calc'] = 'relative'
# define necessary class attributes
self.nElements = nElements
# define inputs
if units is None:
for i in range(0, nElements):
self.add_param('input%i' % i, val=0.0, desc='scalar input')
else:
for i in range(0, nElements):
self.add_param('input%i' % i, val=0.0, units=units, desc='scalar input')
# define output array
if units is None:
self.add_output('Array', np.zeros(nElements), desc='ndArray of all the scalar inputs')
else:
self.add_output('Array', np.zeros(nElements), units=units, desc='ndArray of all the scalar inputs')
def solve_nonlinear(self, params, unknowns, resids):
# assign input values to elements of the output array
for i in range(0, self.nElements):
exec("unknowns['Array'][%i] = params['input%i']" % (i, i))
def linearize(self, params, unknowns, resids):
# initialize gradient calculation array
dArray_dInput = np.zeros(self.nElements)
# initialize Jacobian dict
J = {}
# calculate gradient and populate Jacobian dict
for i in range(0, self.nElements):
dArray_dInput[i] = 1.0
J['Array', 'input%i' % i] = np.array(dArray_dInput)
dArray_dInput[i] = 0.0
return J
class DeMUX(Component):
""" split a given array into separate elements """
def __init__(self, nElements, units=None):
super(DeMUX, self).__init__()
# set finite difference options (fd used for testing only)
self.deriv_options['check_form'] = 'central'
self.deriv_options['check_step_size'] = 1.0e-5
self.deriv_options['check_step_calc'] = 'relative'
# initialize necessary class attributes
self.nElements = nElements
# define input
if units is None:
self.add_param('Array', np.zeros(nElements), desc='ndArray of scalars')
else:
self.add_param('Array', np.zeros(nElements), units=units, desc='ndArray of scalars')
# define outputs
if units is None:
for i in range(0, nElements):
self.add_output('output%i' % i, val=0.0, desc='scalar output')
else:
for i in range(0, nElements):
self.add_output('output%i' % i, val=0.0, units=units, desc='scalar output')
def solve_nonlinear(self, params, unknowns, resids):
# assign elements of the input array to outputs
for i in range(0, self.nElements):
exec("unknowns['output%i'] = params['Array'][%i]" % (i, i))
def linearize(self, params, unknowns, resids):
# initialize gradient calculation array
doutput_dArray = np.eye(self.nElements)
# intialize Jacobian dict
J = {}
# calculate the gradients and populate the Jacobian dict
for i in range(0, self.nElements):
J['output%i' % i, 'Array'] = np.reshape(doutput_dArray[i, :], (1, self.nElements))
return J
# ---- if you know wind speed to power and thrust, you can use these tools ----------------
class CPCT_Interpolate_Gradients(Component):
def __init__(self, nTurbines, direction_id=0, datasize=0):
super(CPCT_Interpolate_Gradients, self).__init__()
# set finite difference options (fd used for testing only)
self.deriv_options['check_form'] = 'central'
self.deriv_options['check_step_size'] = 1.0e-5
self.deriv_options['check_step_calc'] = 'relative'
# define class attributes
self.nTurbines = nTurbines
self.direction_id = direction_id
self.datasize = datasize
# add inputs and outputs
self.add_param('yaw%i' % direction_id, np.zeros(nTurbines), desc='yaw error', units='deg')
self.add_param('wtVelocity%i' % direction_id, np.zeros(nTurbines), units='m/s', desc='hub height wind speed') # Uhub
self.add_output('Cp_out', np.zeros(nTurbines))
self.add_output('Ct_out', np.zeros(nTurbines))
# add variable trees
self.add_param('gen_params:pP', 1.88, pass_by_obj=True)
self.add_param('gen_params:windSpeedToCPCT_wind_speed', np.zeros(datasize), units='m/s',
desc='range of wind speeds', pass_by_obj=True)
self.add_param('gen_params:windSpeedToCPCT_CP', np.zeros(datasize), iotype='out',
desc='power coefficients', pass_by_obj=True)
self.add_param('gen_params:windSpeedToCPCT_CT', np.zeros(datasize), iotype='out',
desc='thrust coefficients', pass_by_obj=True)
def solve_nonlinear(self, params, unknowns, resids):
# obtain necessary inputs
direction_id = self.direction_id
pP = self.params['gen_params:pP']
wind_speed_ax = np.cos(self.params['yaw%i' % direction_id]*np.pi/180.0)**(pP/3.0)*self.params['wtVelocity%i' % direction_id]
# use interpolation on precalculated CP-CT curve
wind_speed_ax = np.maximum(wind_speed_ax, self.params['gen_params:windSpeedToCPCT_wind_speed'][0])
wind_speed_ax = np.minimum(wind_speed_ax, self.params['gen_params:windSpeedToCPCT_wind_speed'][-1])
self.unknowns['Cp_out'] = interp(wind_speed_ax, self.params['gen_params:windSpeedToCPCT_wind_speed'], self.params['gen_params:windSpeedToCPCT_CP'])
self.unknowns['Ct_out'] = interp(wind_speed_ax, self.params['gen_params:windSpeedToCPCT_wind_speed'], self.params['gen_params:windSpeedToCPCT_CT'])
# for i in range(0, len(self.unknowns['Ct_out'])):
# self.unknowns['Ct_out'] = max(max(self.unknowns['Ct_out']), self.unknowns['Ct_out'][i])
# normalize on incoming wind speed to correct coefficients for yaw
self.unknowns['Cp_out'] = self.unknowns['Cp_out'] * np.cos(self.params['yaw%i' % direction_id]*np.pi/180.0)**pP
self.unknowns['Ct_out'] = self.unknowns['Ct_out'] * np.cos(self.params['yaw%i' % direction_id]*np.pi/180.0)**2
def linearize(self, params, unknowns, resids): # standard central differencing
# set step size for finite differencing
h = 1e-6
direction_id = self.direction_id
# calculate upper and lower function values
wind_speed_ax_high_yaw = np.cos((self.params['yaw%i' % direction_id]+h)*np.pi/180.0)**(self.params['gen_params:pP']/3.0)*self.params['wtVelocity%i' % direction_id]
wind_speed_ax_low_yaw = np.cos((self.params['yaw%i' % direction_id]-h)*np.pi/180.0)**(self.params['gen_params:pP']/3.0)*self.params['wtVelocity%i' % direction_id]
wind_speed_ax_high_wind = np.cos(self.params['yaw%i' % direction_id]*np.pi/180.0)**(self.params['gen_params:pP']/3.0)*(self.params['wtVelocity%i' % direction_id]+h)
wind_speed_ax_low_wind = np.cos(self.params['yaw%i' % direction_id]*np.pi/180.0)**(self.params['gen_params:pP']/3.0)*(self.params['wtVelocity%i' % direction_id]-h)
# use interpolation on precalculated CP-CT curve
wind_speed_ax_high_yaw = np.maximum(wind_speed_ax_high_yaw, self.params['gen_params:windSpeedToCPCT_wind_speed'][0])
wind_speed_ax_low_yaw = np.maximum(wind_speed_ax_low_yaw, self.params['gen_params:windSpeedToCPCT_wind_speed'][0])
wind_speed_ax_high_wind = np.maximum(wind_speed_ax_high_wind, self.params['gen_params:windSpeedToCPCT_wind_speed'][0])
wind_speed_ax_low_wind = np.maximum(wind_speed_ax_low_wind, self.params['gen_params:windSpeedToCPCT_wind_speed'][0])
wind_speed_ax_high_yaw = np.minimum(wind_speed_ax_high_yaw, self.params['gen_params:windSpeedToCPCT_wind_speed'][-1])
wind_speed_ax_low_yaw = np.minimum(wind_speed_ax_low_yaw, self.params['gen_params:windSpeedToCPCT_wind_speed'][-1])
wind_speed_ax_high_wind = np.minimum(wind_speed_ax_high_wind, self.params['gen_params:windSpeedToCPCT_wind_speed'][-1])
wind_speed_ax_low_wind = np.minimum(wind_speed_ax_low_wind, self.params['gen_params:windSpeedToCPCT_wind_speed'][-1])
CP_high_yaw = interp(wind_speed_ax_high_yaw, self.params['gen_params:windSpeedToCPCT_wind_speed'], self.params['gen_params:windSpeedToCPCT_CP'])
CP_low_yaw = interp(wind_speed_ax_low_yaw, self.params['gen_params:windSpeedToCPCT_wind_speed'], self.params['gen_params:windSpeedToCPCT_CP'])
CP_high_wind = interp(wind_speed_ax_high_wind, self.params['gen_params:windSpeedToCPCT_wind_speed'], self.params['gen_params:windSpeedToCPCT_CP'])
CP_low_wind = interp(wind_speed_ax_low_wind, self.params['gen_params:windSpeedToCPCT_wind_speed'], self.params['gen_params:windSpeedToCPCT_CP'])
CT_high_yaw = interp(wind_speed_ax_high_yaw, self.params['gen_params:windSpeedToCPCT_wind_speed'], self.params['gen_params:windSpeedToCPCT_CT'])
CT_low_yaw = interp(wind_speed_ax_low_yaw, self.params['gen_params:windSpeedToCPCT_wind_speed'], self.params['gen_params:windSpeedToCPCT_CT'])
CT_high_wind = interp(wind_speed_ax_high_wind, self.params['gen_params:windSpeedToCPCT_wind_speed'], self.params['gen_params:windSpeedToCPCT_CT'])
CT_low_wind = interp(wind_speed_ax_low_wind, self.params['gen_params:windSpeedToCPCT_wind_speed'], self.params['gen_params:windSpeedToCPCT_CT'])
# normalize on incoming wind speed to correct coefficients for yaw
CP_high_yaw = CP_high_yaw * np.cos((self.params['yaw%i' % direction_id]+h)*np.pi/180.0)**self.params['gen_params:pP']
CP_low_yaw = CP_low_yaw * np.cos((self.params['yaw%i' % direction_id]-h)*np.pi/180.0)**self.params['gen_params:pP']
CP_high_wind = CP_high_wind * np.cos((self.params['yaw%i' % direction_id])*np.pi/180.0)**self.params['gen_params:pP']
CP_low_wind = CP_low_wind * np.cos((self.params['yaw%i' % direction_id])*np.pi/180.0)**self.params['gen_params:pP']
CT_high_yaw = CT_high_yaw * np.cos((self.params['yaw%i' % direction_id]+h)*np.pi/180.0)**2
CT_low_yaw = CT_low_yaw * np.cos((self.params['yaw%i' % direction_id]-h)*np.pi/180.0)**2
CT_high_wind = CT_high_wind * np.cos((self.params['yaw%i' % direction_id])*np.pi/180.0)**2
CT_low_wind = CT_low_wind * np.cos((self.params['yaw%i' % direction_id])*np.pi/180.0)**2
# compute derivative via central differencing and arrange in sub-matrices of the Jacobian
dCP_dyaw = np.eye(self.nTurbines)*(CP_high_yaw-CP_low_yaw)/(2.0*h)
dCP_dwind = np.eye(self.nTurbines)*(CP_high_wind-CP_low_wind)/(2.0*h)
dCT_dyaw = np.eye(self.nTurbines)*(CT_high_yaw-CT_low_yaw)/(2.0*h)
dCT_dwind = np.eye(self.nTurbines)*(CT_high_wind-CT_low_wind)/(2.0*h)
# compile Jacobian dict from sub-matrices
J = {}
J['Cp_out', 'yaw%i' % direction_id] = dCP_dyaw
J['Cp_out', 'wtVelocity%i' % direction_id] = dCP_dwind
J['Ct_out', 'yaw%i' % direction_id] = dCT_dyaw
J['Ct_out', 'wtVelocity%i' % direction_id] = dCT_dwind
return J
class CPCT_Interpolate_Gradients_Smooth(Component):
def __init__(self, nTurbines, direction_id=0, datasize=0):
super(CPCT_Interpolate_Gradients_Smooth, self).__init__()
# set finite difference options (fd used for testing only)
self.deriv_options['check_form'] = 'central'
self.deriv_options['check_step_size'] = 1.0e-6
self.deriv_options['check_step_calc'] = 'relative'
# define class attributes
self.nTurbines = nTurbines
self.direction_id = direction_id
self.datasize = datasize
# add inputs and outputs
self.add_param('yaw%i' % direction_id, np.zeros(nTurbines), desc='yaw error', units='deg')
self.add_param('wtVelocity%i' % direction_id, np.zeros(nTurbines), units='m/s', desc='hub height wind speed') # Uhub
self.add_output('Cp_out', np.zeros(nTurbines))
self.add_output('Ct_out', np.zeros(nTurbines))
# add variable trees
self.add_param('gen_params:pP', 3.0, pass_by_obj=True)
self.add_param('gen_params:windSpeedToCPCT_wind_speed', np.zeros(datasize), units='m/s',
desc='range of wind speeds', pass_by_obj=True)
self.add_param('gen_params:windSpeedToCPCT_CP', np.zeros(datasize),
desc='power coefficients', pass_by_obj=True)
self.add_param('gen_params:windSpeedToCPCT_CT', np.zeros(datasize),
desc='thrust coefficients', pass_by_obj=True)
def solve_nonlinear(self, params, unknowns, resids):
direction_id = self.direction_id
pP = self.params['gen_params:pP']
yaw = self.params['yaw%i' % direction_id]
start = 5
skip = 8
# Cp = params['gen_params:windSpeedToCPCT_CP'][start::skip]
Cp = params['gen_params:windSpeedToCPCT_CP']
# Ct = params['gen_params:windSpeedToCPCT_CT'][start::skip]
Ct = params['gen_params:windSpeedToCPCT_CT']
# windspeeds = params['gen_params:windSpeedToCPCT_wind_speed'][start::skip]
windspeeds = params['gen_params:windSpeedToCPCT_wind_speed']
#
# Cp = np.insert(Cp, 0, Cp[0]/2.0)
# Cp = np.insert(Cp, 0, 0.0)
# Ct = np.insert(Ct, 0, np.max(params['gen_params:windSpeedToCPCT_CP'])*0.99)
# Ct = np.insert(Ct, 0, np.max(params['gen_params:windSpeedToCPCT_CT']))
# windspeeds = np.insert(windspeeds, 0, 2.5)
# windspeeds = np.insert(windspeeds, 0, 0.0)
#
# Cp = np.append(Cp, 0.0)
# Ct = np.append(Ct, 0.0)
# windspeeds = np.append(windspeeds, 30.0)
CPspline = Akima(windspeeds, Cp)
CTspline = Akima(windspeeds, Ct)
# n = 500
# x = np.linspace(0.0, 30., n)
CP, dCPdvel, _, _ = CPspline.interp(params['wtVelocity%i' % direction_id])
CT, dCTdvel, _, _ = CTspline.interp(params['wtVelocity%i' % direction_id])
# print('in solve_nonlinear', dCPdvel, dCTdvel)
# pP = 3.0
# print("in rotor, pP = ", pP)
Cp_out = CP*np.cos(yaw*np.pi/180.)**pP
Ct_out = CT*np.cos(yaw*np.pi/180.)**2.
# print("in rotor, Cp = [%f. %f], Ct = [%f, %f]".format(Cp_out[0], Cp_out[1], Ct_out[0], Ct_out[1]))
self.dCp_out_dyaw = (-np.sin(yaw*np.pi/180.))*(np.pi/180.)*pP*CP*np.cos(yaw*np.pi/180.)**(pP-1.)
self.dCp_out_dvel = dCPdvel*np.cos(yaw*np.pi/180.)**pP
# print('in solve_nonlinear', self.dCp_out_dyaw, self.dCp_out_dvel)
self.dCt_out_dyaw = (-np.sin(yaw*np.pi/180.))*(np.pi/180.)*2.*CT*np.cos(yaw*np.pi/180.)
self.dCt_out_dvel = dCTdvel*np.cos(yaw*np.pi/180.)**2.
# normalize on incoming wind speed to correct coefficients for yaw
self.unknowns['Cp_out'] = Cp_out
self.unknowns['Ct_out'] = Ct_out
def linearize(self, params, unknowns, resids): # standard central differencing
# obtain necessary inputs
direction_id = self.direction_id
# compile Jacobian dict
J = {}
J['Cp_out', 'yaw%i' % direction_id] = np.eye(self.nTurbines)*self.dCp_out_dyaw
J['Cp_out', 'wtVelocity%i' % direction_id] = np.eye(self.nTurbines)*self.dCp_out_dvel
J['Ct_out', 'yaw%i' % direction_id] = np.eye(self.nTurbines)*self.dCt_out_dyaw
J['Ct_out', 'wtVelocity%i' % direction_id] = np.eye(self.nTurbines)*self.dCt_out_dvel
return J
# legacy code for simple COE calculations - should be done more formally
'''
class calcICC(Component):
"""
Calculates ICC (initial capital cost) for given windfarm layout
The initial capital cost is the sum of the turbine system cost and the balance of station cost.
Neither cost includes construction financing or financing fees,
because | |
<gh_stars>0
"""
This script is generally for the purpose of extracting quantities of interest
from the Sapphire-generated Metadata object as it exists in V0.10.1
This will simply be a placeholder until a more robust way of writing and storing
the output data can be
"""
import pickle
from ase.io import read
import numpy as np
import matplotlib.pyplot as plt
def distance(a, b):
dx = abs(a[0] - b[0])
dy = abs(a[1] - b[1])
dz = abs(a[2] - b[2])
return np.sqrt(dx**2 + dy**2 + dz**2)
def Collect_CNA(Data, Sig):
"""
Parameters
----------
Data : TYPE - List
Must be of the form
Read_Data = Reader(...)... etc...
Data = Read_Data[2][Key] for whichever simulation
you wish to extract the cna signatures from.
Note that the index '2' corresponds to the extracted cna sigs.
Sig : TYPE - Tuple
Will be of the form (r, s, t) EXACTLY
where r,s,t are the triplet of the desired signature
Returns
-------
list
A list of the normalised frequency of occurance for
a given signature to be observed in the given simulation.
"""
try:
Index = Data[0][1].index( Sig )
#This will pull the index of the desired signature from the
#first frame of the data.
return [ Data[x][0][Index] for x in range(len(Data)) ]
except Exception as e:
print(e)
return None
Sims = ['Sim-1345/', 'Sim-2783/', 'Sim-3987/', 'Sim-4009/']
Struts = ['Co/', 'Ih/']
def New_File(path, new_movie='Quantity_movie.xyz', Quantities = []):
Reference = read(path, index = ':')
"""
Robert:
This function, at the moment, is only supporting the introduction of the aGCN
to the new xyz file.
But this is easily appended to as needs dictate.
"""
with open(new_movie, 'w+') as movie:
movie.write(str(len(Reference[0])) +'\n')
movie.write('\t' + "This was made by Jones' post-processing code." + '\n')
for i in range(len(Reference)):
Ele = Reference[i].get_chemical_symbols()
Pos = Reference[i].positions
items = np.column_stack(( Ele, Pos))
for obj in Quantities:
items = np.column_stack((items, obj[i]))
for atom in items:
movie.write(' \t'.join(str(item) for item in atom) +'\n')
movie.write(str(len(Ele)) + '\n')
movie.write('\n')
def hebond(data):
a = np.array([ sum(data[t][1]) for t in range(len(data)) ])
b = np.array([ np.concatenate((data[t][0], data[t][1]), axis = 0) for t in range(len(data)) ])
c = [ [0] * len(data[0][0]) ]; d = [ [0] * len(data[0][1]) ]
for t in range(len(data)-1):
c.append( [ data[t+1][0][x] - data[t][0][x] for x in data[t][0] ] )
d.append( [data[t+1][0][x] - data[t][0][x] for x in data[t][1] ] )
e = []
f = []
for t in range(len(data)):
e.append(np.concatenate((c[t], d[t])))
e = np.array(e)
c = [0]
for t in range(len(data)-1):
c.append( sum(data[t+1][1]) - sum(data[t][1]))
c = np.array(c)
d = np.array([data[t][1] for t in range(len(data)) ])
return a,b,e,c,d
def Relative(headj, nn):
return [ headj[t][1] / nn[t][309:] for t in range(len(nn)) ]
def Init():
"""
Returns
-------
edelta : TYPE
DESCRIPTION.
comspace : TYPE
DESCRIPTION.
cna_sigs : TYPE
DESCRIPTION.
adj : TYPE
DESCRIPTION.
agcn : TYPE
DESCRIPTION.
com : TYPE
DESCRIPTION.
comdist : TYPE
DESCRIPTION.
surf_atoms : TYPE
DESCRIPTION.
comAu : TYPE
DESCRIPTION.
comPt : TYPE
DESCRIPTION.
hoadjAu : TYPE
DESCRIPTION.
hoadjPt : TYPE
DESCRIPTION.
comdistAu : TYPE
DESCRIPTION.
comdistPt : TYPE
DESCRIPTION.
midcomdistAu : TYPE
DESCRIPTION.
midcomdistPt : TYPE
DESCRIPTION.
surf_atomsPt : TYPE
DESCRIPTION.
headj : TYPE
DESCRIPTION.
mix : TYPE
DESCRIPTION.
"""
edelta = {}; comspace = {}; cna_sigs = {}
com = {}; comdist = {}; surf_atoms = {}
comAu = {}; comPt = {}; hoadjAu = {}; hoadjPt = {}
comdistAu = {}; comdistPt = {}; midcomdistPt = {} ; nn = {}
midcomdistAu = {}; surf_atomsPt = {}; headj = {}; mix = {}
PtAu = {}; PtOnly = {}; AvgCoPt = {}; GyrationPt = {}; Gyration = {}
return (edelta, comspace, cna_sigs, com, comdist,
surf_atoms, comAu, comPt, hoadjAu, hoadjPt, comdistAu,
comdistPt, midcomdistAu, midcomdistPt, surf_atomsPt,
headj, mix, nn, PtAu, PtOnly, AvgCoPt, Gyration, GyrationPt)
def Reader(T, Seed, Struts, Sims):
"""
Parameters
----------
Struts : TYPE
DESCRIPTION.
Sims : TYPE
DESCRIPTION.
Returns
-------
init : TYPE
DESCRIPTION.
"""
init = Init()
for Strut in Struts:
for Sim in Sims:
try:
with open(T+Seed+Strut+Sim+'Metadata.csv', 'rb') as infile:
Temp = pickle.load(infile)
init[0][Strut+Sim] = Temp['edelta'] #t-type: number
init[1][Strut+Sim] = Temp['comspace'] #t-type: array
init[2][Strut+Sim] = Temp['cna_sigs'] #t-type: number
init[3][Strut+Sim] = Temp['com'] #t-type: array
init[4][Strut+Sim] = Temp['comdist'] #t-type: array
init[5][Strut+Sim] = Temp['surf_atoms'] #t-type: number
init[6][Strut+Sim] = Temp['comAu'] #t-type: array
init[7][Strut+Sim] = Temp['comPt'] #t-type: array
hoadjAu = Temp['hoadjAu']
init[8][Strut+Sim] = np.array([ x for x in hoadjAu ] ) #t-type: list
hoadjPt = Temp['hoadjPt']
init[9][Strut+Sim] = np.array([ x for x in hoadjPt ] ) #t-type: list
init[10][Strut+Sim] = Temp['comdistAu'] #t-type: array
init[11][Strut+Sim] = Temp['comdistPt'] #t-type: array
init[12][Strut+Sim] = Temp['midcomdistPt'] #t-type: array
init[13][Strut+Sim] = Temp['midcomdistAu'] #t-type: array
init[14][Strut+Sim] = Temp['surf_atomsPt'] #t-type: number
headj = Temp['headj'] #t-type: tuple #######
PtAuTemp = []
PtOnlyTemp = []
for t in range(len(headj)):
Temp1 = [ x for x in headj[t][1] if x == 0 ]
Temp2 = [ x for x in headj[t][1] if x > 9 ]
PtAuTemp.append(len(Temp2))
PtOnlyTemp.append(len(Temp1)/55)
init[15][Strut+Sim] = PtAuTemp
init[16][Strut+Sim] = PtOnlyTemp
init[17][Strut+Sim] = Temp['mix'] #t-type: number
Spare = []
for t in range(len(headj)):
c = np.average([ Temp['hoadjPt'][t][i] + headj[t][0][i] for i in range(len(Temp['hoadjPt'][t])) ])
Spare.append(c)
init[18][Strut+Sim] = Spare #t-type: number
init[19][Strut+Sim] = Temp['gyrationPt'] #t-type: number
init[20][Strut+Sim] = Temp['gyration'] #t-type: number
init[21][Strut+Sim] = headj
del(Temp)
print(Strut+Sim)
except Exception as e:
print(e)
return init
def clean(data, strut):
System = {
'edetla' : np.zeros(len(data[0]), dtype = float),
'comspace' : np.zeros(len(data[1]), dtype = float),
'421' : np.zeros(len(data[2]), dtype = float),
'422' : np.zeros(len(data[2]), dtype = float),
'555' : np.zeros(len(data[2]), dtype = float),
'com' : np.zeros(len(data[3]), dtype = object),
'comdist' : np.zeros(len(data[4]), dtype = object),
'surf_atoms' : np.zeros(len(data[5]), dtype = float),
'comAu' : np.zeros(len(data[6]), dtype = object),
'comPt' : np.zeros(len(data[7]), dtype = object),
'hoadjAu' : np.zeros(len(data[8]), dtype = object),
'hoadjPt' : np.zeros(len(data[9]), dtype = object),
'comdistAu' : np.zeros(len(data[10]), dtype = object),
'comdistPt' : np.zeros(len(data[11]), dtype = object),
'midcomdistAu' : np.zeros(len(data[13]), dtype = object),
'midcomdistPt' : np.zeros(len(data[12]), dtype = object),
'surf_atomsPt' : np.zeros(len(data[14]), dtype = float),
'mix' : np.zeros(len(data[16]), dtype = float),
'headj' : np.zeros(len(data[15]), dtype = object),
'atombonds' : np.zeros(len(data[15]), dtype = object),
'deltaatoms' : np.zeros(len(data[15]), dtype = object),
'deltabonds' : np.zeros(len(data[15]), dtype = object),
'nnadj' : np.zeros(len(data[15]), dtype = object),
'nn' : np.zeros(len(data[15]), dtype = object),
'PtOnly' : np.zeros(len(data[15]), dtype = object),
'PtAu' : np.zeros(len(data[15]), dtype = object),
'GyrPt' : np.zeros(len(data[15]), dtype = object),
'Gyr' : np.zeros(len(data[15]), dtype = object),
'AvgCoPt' : np.zeros(len(data[15]), dtype = object)
}
Keys = data[0].keys()
print(Keys)
Tempedelta = []; Tempcomspace = []; Temp421 = []; Temp422 = []; Temp555 = []
Tempcom = []; Tempcomdist = []; Tempsurf_atoms = []
TempcomAu = []; TempcomPt = []; TemphoadjAu = []; TemphoadjPt = []
TempcomdistAu = []; TempcomdistPt = []; TempmidcomdistPt = []
TempmidcomdistAu = []; Tempsurf_atomsPt = []; Tempmix = []
Tempheadj = []; Tempatombonds = []; Tempdeltaatoms = []; Tempdeltabonds = []
Tempnnadj = []; Tempnn = []; TempPtOnly = []; TempPtAu = []
TempGyrPt = []; TempGyr = []; TempAvgCoPt = []
for Key in Keys:
try:
Tempedelta.append(data[0][Key])
Tempcomspace.append(data[1][Key])
Temp421.append( Collect_CNA( data[2][Key], (4, 2, 1) ) )
Temp422.append( Collect_CNA( data[2][Key], (4, 2, 2) ) )
Temp555.append( Collect_CNA( data[2][Key], (5, 5, 5) ) )
Tempcom.append(data[3][Key])
Tempcomdist.append(data[4][Key])
Tempsurf_atoms.append(data[5][Key])
TempcomAu.append(data[6][Key])
TempcomPt.append(data[7][Key])
TemphoadjAu.append(data[8][Key])
TemphoadjPt.append(data[9][Key])
TempcomdistAu.append(data[10][Key])
TempcomdistPt.append(data[11][Key])
TempmidcomdistAu.append(data[13][Key])
TempmidcomdistPt.append(data[12][Key])
Tempsurf_atomsPt.append(data[14][Key])
TempPtAu.append(data[15][Key])
TempPtOnly.append(data[16][Key])
Tempmix.append(data[17][Key])
TempAvgCoPt.append(data[18][Key])
TempGyrPt.append(data[19][Key])
TempGyr.append(data[20][Key])
HeAdj = hebond(data[21][Key])
Tempheadj.append(HeAdj[0])
Tempatombonds.append(HeAdj[1])
Tempdeltaatoms.append(HeAdj[2])
Tempdeltabonds.append(HeAdj[3])
Tempnnadj.append(HeAdj[4])
#New_File(Key+'NewMovie.xyz', new_movie=Key+'Quantity_movie.xyz', Quantities = [HeAdj[1], HeAdj[2]])
except Exception as e:
print(e)
System['edetla'] = np.average(Tempedelta, axis = 0)
System['comspace'] = np.average(Tempcomspace, axis = 0)
System['421'] = np.average(Temp421, axis = 0)
System['422'] = np.average(Temp422, axis = 0)
System['555'] = np.average(Temp555, axis = 0)
System['com'] = np.average(Tempcom, axis = 0)
System['comdist'] = np.average(Tempcomdist, axis = 0)
System['surf_atoms'] = np.average(Tempsurf_atoms, axis = 0)
System['comAu'] = np.average(TempcomAu, axis = 0)
System['comPt'] = np.average(TempcomPt, axis = 0)
System['hoadjAu'] = np.average(TemphoadjAu, axis = 0)
System['hoadjPt'] = np.average(TemphoadjPt, axis = 0)
System['comdistAu'] = np.average(TempcomdistAu, axis | |
tol=tol):
"""Compute hyp1f1 using the Taylor series, with recurrence to avoid
the dangerous regions where |a| > |b| and sign(a) = -sign(z).
"""
if np.sign(z) == -np.sign(a) and np.abs(a) > np.abs(b):
if a < 0 and b > 0:
w0 = taylor_series(a, -int(a) + b + 1, z, maxiters, tol)
w1 = taylor_series(a, -int(a) + b, z, maxiters, tol)
return b_backward_recurrence(a, -int(a) + b, z, w0, w1, -int(a))
# elif a < 0 and b < 0:
# w0 = taylor_series(a, int(a) + b, z, maxiters, tol)
# return b_forward_recurrence(a, int(a) + b, z, w0, -int(a), tol)
elif a > 0 and b > 0:
w0 = taylor_series(a, int(a) + b + 1, z, maxiters, tol)
w1 = taylor_series(a, int(a) + b, z, maxiters, tol)
return b_backward_recurrence(a, int(a) + b, z, w0, w1, int(a))
# elif a > 0 and b < 0:
# w0 = taylor_series(a, -int(a) + b, z, maxiters, tol)
# return b_forward_recurrence(a, -int(a) + b, z, w0, int(a), tol)
return taylor_series(a, b, z, maxiters, tol)
def taylor_series_ab_recur(a, b, z, maxiters=500, tol=tol):
"""Compute hyp1f1 using the Taylor series, with ab recurrence
used whenever sign(a) = - sign(z). (This is too broad a range of use,
but I'd like to see if it helps at all.)
"""
if np.abs(z) > 1 and np.sign(z) == -np.sign(a):
if a < 0:
N = int(np.abs(a)) - 1
w0 = taylor_series(a + N, b + N, z)
w1 = taylor_series(a + N - 1, b + N - 1, z)
return ab_backward_recurrence(a + N - 1, b + N - 1, z, w0, w1, N)
# elif a > 0:
# N = int(np.abs(a)) - 1
# w0 = taylor_series(a - N, b - N, z)
# return ab_forward_recurrence(a - N, b - N, z, w0, N, 10**(-5))
return taylor_series(a, b, z, maxiters, tol)
def single_fraction(a, b, z, maxiters=500, tol=tol):
"""Compute 1F1 by expanding the Taylor series as a single fraction and
performing one division at the end. See section 3.3 of _[pop] for
details.
"""
# zeroth iteration
alpha, beta, gamma = 0, 1, 1
zetam = 1
# first iteration
i = 1
alpha = (alpha + beta)*i*(b + i - 1)
beta = beta*(a + i - 1)*z
gamma = gamma*i*(b + i - 1)
zetan = (alpha + beta) / gamma
i = 2
while i <= maxiters:
alpha = (alpha + beta)*i*(b + i - 1)
beta = beta*(a + i - 1)*z
gamma = gamma*i*(b + i - 1)
tmp = zetan
zetan = (alpha + beta) / gamma
zetao = zetam
zetam = tmp
if np.abs((zetan - zetam) / zetam) < tol and \
np.abs((zetam - zetao) / zetao) < tol:
break
i += 1
# if i > maxiters:
# warnings.warn("Number of evaluations exceeded maxiters on "
# "a = {}, b = {}, z = {}.".format(a, b, z))
return zetan
def old_asymptotic_series(a, b, z, maxiters=500, tol=tol):
"""Compute hyp1f1 using an asymptotic series. This uses DLMF 13.7.2
and DLMF 13.2.4. Note that the series is divergent (as one would
expect); this can be seen by the ratio test.
"""
# S1 is the first sum; the ith term is
# (1 - a)_i * (b - a)_i * z^(-s) / i!
# S2 is the second sum; the ith term is
# (a)_i * (a - b + 1)_i * (-z)^(-s) / i!
A1 = 1
S1 = A1
A2 = 1
S2 = A2
# Is 8 terms optimal? Not sure.
for i in range(1, 9):
A1 = A1*(i - a)*(b - a + i - 1) / (z*i)
S1 += A1
A2 = -A2*(a + i - 1)*(a - b + i) / (z*i)
S2 += A2
phi = np.angle(z)
if np.imag(z) == 0:
expfac = np.cos(pi*a)
elif phi > -0.5*pi and phi < 1.5*pi:
expfac = np.exp(1J*pi*a)
elif phi > -1.5*pi and phi <= -0.5*pi:
expfac = np.exp(-1J*pi*a)
else:
raise Exception("Shouldn't be able to get here!")
c1 = np.exp(z)*z**(a - b)*rgamma(a)
c2 = expfac*z**(-a)*rgamma(b - a)
return gamma(b)*(c1*S1 + c2*S2)
def asymptotic_series(a, b, z, maxiters=500, tol=tol):
"""Compute hyp1f1 using an asymptotic series. This uses DLMF 13.7.2
and DLMF 13.2.4. Note that the series is divergent (as one would
expect); this can be seen by the ratio test.
"""
if np.imag(z) == 0 and np.real(z) < 0:
return paris_series(a, b, z, maxiters)
phi = np.angle(z)
if np.imag(z) == 0:
expfac = np.cos(pi*a)
elif phi > -0.5*pi and phi < 1.5*pi:
expfac = np.exp(1J*pi*a)
elif phi > -1.5*pi and phi <= -0.5*pi:
expfac = np.exp(-1J*pi*a)
else:
raise Exception("Shouldn't be able to get here!")
if np.real(a) and np.real(b) and np.real(z) and (a < 0 or b < 0):
# gammaln will not give correct results for real negative
# arguments, so the args must be cast to complex.
c1 = np.real(np.exp(gammaln(b+0j) - gammaln(a+0j) + z
+ (a-b)*np.log(z)))
else:
c1 = np.exp(gammaln(b) - gammaln(a) + z + (a-b)*np.log(z))
if np.real(a) and np.real(b) and np.real(z) and (b - a < 0 or b < 0):
c2 = np.real(np.exp(gammaln(b + 0j) - gammaln(b - a + 0j) - a*np.log(z)))
else:
c2 = np.exp(gammaln(b) - gammaln(b - a) - a*np.log(z))
# S1 is the first sum; the ith term is
# (1 - a)_i * (b - a)_i * z^(-s) / i!
# S2 is the second sum; the ith term is
# (a)_i * (a - b + 1)_i * (-z)^(-s) / i!
largest_term = 0
previous_term = np.inf
A1 = 1
S1 = A1
A2 = 1
S2 = A2
for i in range(1, maxiters + 1):
A1 = A1*(i - a)*(b - a + i - 1) / (z*i)
A2 = -A2*(a + i - 1)*(a - b + i) / (z*i)
current_term = np.abs(c1*A1 + c2*A2)
if current_term > largest_term:
# Sometimes, the terms of the series increase initially. We want
# to keep summing until we're past the first maximum.
largest_term = current_term
elif current_term > previous_term:
# We've passed the smallest term of the series: adding more will
# only harm precision
break
current_sum = c1*S1 + c2*S2
if c1*(S1 + A1) + c2*(S2 + A2) == current_sum or not np.isfinite(current_sum):
break
S1 += A1
S2 += A2
previous_term = current_term
return c1*S1 + c2*S2
def paris_series(a, b, z, maxiters=200, terms=0):
"""The exponentially improved asymptotic expansion along the negative real
axis developed by Paris (2013).
The exponentially improved expansion does not appear to improve the
performance of the series in general, perhaps as a result of an
implementation bug, so it has been commented out.
To sum a predetermined number of terms (rather than stopping at the
estimated optimal term), use the terms kwarg.
"""
x = -z
if np.real(a) and np.real(b) and (b < 0 or b - a < 0):
c = np.exp(gammaln(b + 0j) - gammaln(b - a + 0j) - a*np.log(x))
c = np.real(c)
else:
c = np.exp(gammaln(b) - gammaln(b - a) - a*np.log(x))
theta = a - b
if np.isreal(theta) and theta == np.floor(theta):
if theta >= 0:
# The hypergeometric function is a polynomial in n, there are
# no exponentially small corrections
if np.real(a) and np.real(b) and (b < 0 or a < 0):
c = np.exp(-x + gammaln(a + 0j) - gammaln(b + 0j))
c = np.real(c)
else:
c = np.exp(-x + gammaln(a) - gammaln(b))
c = c * x**theta * (-1)**theta
A1 = 1
S1 = 1
if terms > 0:
n = terms
else:
n = int(theta)
for i in xrange(n + 1):
A1 = A1*((1 - a + i)*(n - i)/(x*(i + 1)))
S1 += A1
return c*S1
else:
# We use the same prefactor c as in the general case, but sum
# a fixed number of terms, not up to the smallest one.
| |
' ' + '0.0'
pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0'
e = "%s %s %s %s %s" % (p0, pb, p1, pu, p0)
i = []
if exts[1] is not None:
for h in exts[1]:
p0x = h[0]
p0y = h[1]
p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0'
p1x = h[2]
p1y = h[3]
p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0'
pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0'
pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0'
i.append("%s %s %s %s %s" % (p0, pu, p1, pb, p0))
return e, i
def b2s(exts):
"""Convert two points of a solid into its bounding box.
(Cube-like solid parallel with axes.)
"""
p0x = exts[0][0]
p0y = exts[0][1]
p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0'
p0T = str(p0x) + ' ' + str(p0y) + ' ' + str(exts[1])
p1x = exts[0][2]
p1y = exts[0][3]
p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0'
p1T = str(p1x) + ' ' + str(p1y) + ' ' + str(exts[1])
pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0'
pbT = str(p1x) + ' ' + str(p0y) + ' ' + str(exts[1])
pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0'
puT = str(p0x) + ' ' + str(p1y) + ' ' + str(exts[1])
surfaces = []
surfaces.append("%s %s %s %s %s" % (p0, pu, p1, pb, p0))
surfaces.append("%s %s %s %s %s" % (p0T, pbT, p1T, puT, p0T))
surfaces.append("%s %s %s %s %s" % (p0, pb, pbT, p0T, p0))
surfaces.append("%s %s %s %s %s" % (pb, p1, p1T, pbT, pb))
surfaces.append("%s %s %s %s %s" % (p1, pu, puT, p1T, p1))
surfaces.append("%s %s %s %s %s" % (pu, p0, p0T, puT, pu))
return surfaces
def CityGMLstreets(CityModel, street_data):
"""Generates a road network with the thematic module for Transportation Objects."""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
transpobj = etree.SubElement(cityObject, "{%s}Road" % ns_tran)
if ASSIGNID:
transpobj.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
transpms = etree.SubElement(transpobj, "{%s}lod1MultiSurface" % ns_tran)
MultiSurface = etree.SubElement(transpms, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
street_points = b2p(street_data)
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = street_points[0]
for h in street_points[1]:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = h
def CityGMLplantCoverLOD0(CityModel, pc_data):
"""Generates a PlantCover as a 2.5D surface."""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
pcobj = etree.SubElement(cityObject, "{%s}PlantCover" % ns_veg)
if ASSIGNID:
pcobj.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
pcms = etree.SubElement(pcobj, "{%s}lod1MultiSurface" % ns_veg)
MultiSurface = etree.SubElement(pcms, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
pc_points = b2p([pc_data[0], None])
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = pc_points[0]
def CityGMLplantCoverLOD1(CityModel, pc_data):
"""Generates a PlantCover as a solid."""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
pcobj = etree.SubElement(cityObject, "{%s}PlantCover" % ns_veg)
if ASSIGNID:
pcobj.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
lod1MultiSolid = etree.SubElement(pcobj, "{%s}lod1MultiSolid" % ns_veg)
multiSolid = etree.SubElement(lod1MultiSolid, "{%s}MultiSolid" % ns_gml)
solidmember = etree.SubElement(multiSolid, "{%s}solidMember" % ns_gml)
Solid = etree.SubElement(solidmember, "{%s}Solid" % ns_gml)
if ASSIGNID:
Solid.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
exterior = etree.SubElement(Solid, "{%s}exterior" % ns_gml)
CompositeSurface = etree.SubElement(exterior, "{%s}CompositeSurface" % ns_gml)
pc_surfaces = b2s(pc_data)
for pc_s in pc_surfaces:
addsurface(False, CompositeSurface, pc_s)
def rotator(vertex, sine, cos, origin_of_rotation):
"Rotate the vertex around the origin by an angle (2D). Cos and sin are already precomputed to make the calculations more efficient due to many repetitions."
vertex = [float(vertex[0]), float(vertex[1]), float(vertex[2])]
rotated = [None, None, vertex[2]]
rotated[0] = ((vertex[0]-origin_of_rotation[0]) * cos - (vertex[1]-origin_of_rotation[1]) * sine) + origin_of_rotation[0]
rotated[1] = ((vertex[0]-origin_of_rotation[0]) * sine + (vertex[1]-origin_of_rotation[1]) * cos) + origin_of_rotation[1]
return rotated
#----------------------------------------------------------------------
#-- Start of the program
print('Parsing file', BUILDINGFILE, '...')
#-- Parse the file containing the building information
BUILDINGFILE = etree.parse(BUILDINGFILE)
root = BUILDINGFILE.getroot()
#-- Buildings will be stored here
buildings = []
#-- Streets will be stored here
streets = []
#-- PlantCover will be stored here
plantcover = []
#-- Find all instances of city objects in the XML and put them in a list
for obj in root.getiterator('building'):
buildings.append(obj)
for obj in root.getiterator('streets'):
streets.append(obj)
for obj in root.getiterator('parks'):
plantcover.append(obj)
print("There are", len(buildings), "buildings(s) in this XML. Processing...")
print("Opening empty CityGML files...")
CityGMLs = {}
#-- Instances
## LOD0
#-- LOD0.0
CityGMLs['LOD0_0'] = createCityGML('LOD0_0')
#-- LOD0.1
if VARIANTS:
CityGMLs['LOD0_1_F0_H0'] = createCityGML('LOD0_1_F0_H0')
CityGMLs['LOD0_1_F0_H1'] = createCityGML('LOD0_1_F0_H1')
CityGMLs['LOD0_1_F0_H2'] = createCityGML('LOD0_1_F0_H2')
CityGMLs['LOD0_1_F0_H3'] = createCityGML('LOD0_1_F0_H3')
if VARIANTS:
CityGMLs['LOD0_1_F0_H4'] = createCityGML('LOD0_1_F0_H4')
CityGMLs['LOD0_1_F0_H5'] = createCityGML('LOD0_1_F0_H5')
CityGMLs['LOD0_1_F0_H6'] = createCityGML('LOD0_1_F0_H6')
CityGMLs['LOD0_1_F0_HAvg'] = createCityGML('LOD0_1_F0_HAvg')
CityGMLs['LOD0_1_F0_HMed'] = createCityGML('LOD0_1_F0_HMed')
if VARIANTS:
CityGMLs['LOD0_1_F1_H0'] = createCityGML('LOD0_1_F1_H0')
CityGMLs['LOD0_1_F1_H1'] = createCityGML('LOD0_1_F1_H1')
CityGMLs['LOD0_1_F1_H2'] = createCityGML('LOD0_1_F1_H2')
CityGMLs['LOD0_1_F1_H3'] = createCityGML('LOD0_1_F1_H3')
CityGMLs['LOD0_1_F1_H4'] = createCityGML('LOD0_1_F1_H4')
CityGMLs['LOD0_1_F1_H5'] = createCityGML('LOD0_1_F1_H5')
CityGMLs['LOD0_1_F1_H6'] = createCityGML('LOD0_1_F1_H6')
CityGMLs['LOD0_1_F1_HAvg'] = createCityGML('LOD0_1_F1_HAvg')
CityGMLs['LOD0_1_F1_HMed'] = createCityGML('LOD0_1_F1_HMed')
if VARIANTS:
CityGMLs['LOD0_1_Fd_H0'] = createCityGML('LOD0_1_Fd_H0')
CityGMLs['LOD0_1_Fd_H1'] = createCityGML('LOD0_1_Fd_H1')
CityGMLs['LOD0_1_Fd_H2'] = createCityGML('LOD0_1_Fd_H2')
CityGMLs['LOD0_1_Fd_H3'] = createCityGML('LOD0_1_Fd_H3')
CityGMLs['LOD0_1_Fd_H4'] = createCityGML('LOD0_1_Fd_H4')
CityGMLs['LOD0_1_Fd_H5'] = createCityGML('LOD0_1_Fd_H5')
CityGMLs['LOD0_1_Fd_H6'] = createCityGML('LOD0_1_Fd_H6')
CityGMLs['LOD0_1_Fd_HAvg'] = createCityGML('LOD0_1_Fd_HAvg')
CityGMLs['LOD0_1_Fd_HMed'] = createCityGML('LOD0_1_Fd_HMed')
#-- LOD0.2
if VARIANTS:
CityGMLs['LOD0_2_F0_H0'] = createCityGML('LOD0_2_F0_H0')
CityGMLs['LOD0_2_F0_H1'] = createCityGML('LOD0_2_F0_H1')
CityGMLs['LOD0_2_F0_H2'] = createCityGML('LOD0_2_F0_H2')
CityGMLs['LOD0_2_F0_H3'] = createCityGML('LOD0_2_F0_H3')
if VARIANTS:
CityGMLs['LOD0_2_F0_H4'] = createCityGML('LOD0_2_F0_H4')
CityGMLs['LOD0_2_F0_H5'] = createCityGML('LOD0_2_F0_H5')
CityGMLs['LOD0_2_F0_H6'] = createCityGML('LOD0_2_F0_H6')
CityGMLs['LOD0_2_F0_HAvg'] = createCityGML('LOD0_2_F0_HAvg')
CityGMLs['LOD0_2_F0_HMed'] = createCityGML('LOD0_2_F0_HMed')
if VARIANTS:
CityGMLs['LOD0_2_F1_H0'] = createCityGML('LOD0_2_F1_H0')
CityGMLs['LOD0_2_F1_H1'] = createCityGML('LOD0_2_F1_H1')
CityGMLs['LOD0_2_F1_H2'] = createCityGML('LOD0_2_F1_H2')
CityGMLs['LOD0_2_F1_H3'] = createCityGML('LOD0_2_F1_H3')
CityGMLs['LOD0_2_F1_H4'] = createCityGML('LOD0_2_F1_H4')
CityGMLs['LOD0_2_F1_H5'] = createCityGML('LOD0_2_F1_H5')
CityGMLs['LOD0_2_F1_H6'] = createCityGML('LOD0_2_F1_H6')
CityGMLs['LOD0_2_F1_HAvg'] = createCityGML('LOD0_2_F1_HAvg')
CityGMLs['LOD0_2_F1_HMed'] = createCityGML('LOD0_2_F1_HMed')
if VARIANTS:
CityGMLs['LOD0_2_Fd_H0'] = createCityGML('LOD0_2_Fd_H0')
CityGMLs['LOD0_2_Fd_H1'] = createCityGML('LOD0_2_Fd_H1')
CityGMLs['LOD0_2_Fd_H2'] = createCityGML('LOD0_2_Fd_H2')
CityGMLs['LOD0_2_Fd_H3'] = createCityGML('LOD0_2_Fd_H3')
CityGMLs['LOD0_2_Fd_H4'] = createCityGML('LOD0_2_Fd_H4')
CityGMLs['LOD0_2_Fd_H5'] = createCityGML('LOD0_2_Fd_H5')
CityGMLs['LOD0_2_Fd_H6'] = createCityGML('LOD0_2_Fd_H6')
CityGMLs['LOD0_2_Fd_HAvg'] = createCityGML('LOD0_2_Fd_HAvg')
CityGMLs['LOD0_2_Fd_HMed'] = createCityGML('LOD0_2_Fd_HMed')
#-- LOD0.3
if VARIANTS:
CityGMLs['LOD0_3_F0_H0'] = createCityGML('LOD0_3_F0_H0')
CityGMLs['LOD0_3_F0_H1'] = createCityGML('LOD0_3_F0_H1')
CityGMLs['LOD0_3_F0_H2'] = createCityGML('LOD0_3_F0_H2')
CityGMLs['LOD0_3_F0_H3'] = createCityGML('LOD0_3_F0_H3')
if VARIANTS:
CityGMLs['LOD0_3_F0_H4'] = createCityGML('LOD0_3_F0_H4')
CityGMLs['LOD0_3_F0_H5'] = createCityGML('LOD0_3_F0_H5')
CityGMLs['LOD0_3_F0_H6'] = createCityGML('LOD0_3_F0_H6')
CityGMLs['LOD0_3_F0_HAvg'] = createCityGML('LOD0_3_F0_HAvg')
CityGMLs['LOD0_3_F0_HMed'] = createCityGML('LOD0_3_F0_HMed')
if VARIANTS:
CityGMLs['LOD0_3_F1_H0'] = createCityGML('LOD0_3_F1_H0')
CityGMLs['LOD0_3_F1_H1'] = createCityGML('LOD0_3_F1_H1')
CityGMLs['LOD0_3_F1_H2'] = createCityGML('LOD0_3_F1_H2')
CityGMLs['LOD0_3_F1_H3'] = createCityGML('LOD0_3_F1_H3')
CityGMLs['LOD0_3_F1_H4'] = createCityGML('LOD0_3_F1_H4')
CityGMLs['LOD0_3_F1_H5'] = createCityGML('LOD0_3_F1_H5')
CityGMLs['LOD0_3_F1_H6'] = createCityGML('LOD0_3_F1_H6')
CityGMLs['LOD0_3_F1_HAvg'] = createCityGML('LOD0_3_F1_HAvg')
CityGMLs['LOD0_3_F1_HMed'] = createCityGML('LOD0_3_F1_HMed')
if VARIANTS:
CityGMLs['LOD0_3_Fd_H0'] = createCityGML('LOD0_3_Fd_H0')
CityGMLs['LOD0_3_Fd_H1'] = createCityGML('LOD0_3_Fd_H1')
CityGMLs['LOD0_3_Fd_H2'] = createCityGML('LOD0_3_Fd_H2')
CityGMLs['LOD0_3_Fd_H3'] = createCityGML('LOD0_3_Fd_H3')
CityGMLs['LOD0_3_Fd_H4'] = createCityGML('LOD0_3_Fd_H4')
CityGMLs['LOD0_3_Fd_H5'] = createCityGML('LOD0_3_Fd_H5')
CityGMLs['LOD0_3_Fd_H6'] = createCityGML('LOD0_3_Fd_H6')
CityGMLs['LOD0_3_Fd_HAvg'] = createCityGML('LOD0_3_Fd_HAvg')
CityGMLs['LOD0_3_Fd_HMed'] = createCityGML('LOD0_3_Fd_HMed')
## LOD1
#-- LOD1.0
CityGMLs['LOD1_0_HMin'] = createCityGML('LOD1_0_HMin')
if SOLIDS:
CityGMLs['LOD1_0_HMin_solid'] = createCityGML('LOD1_0_HMin_solid')
CityGMLs['LOD1_0_HMin_semantics'] = createCityGML('LOD1_0_HMin_semantics')
if VARIANTS:
CityGMLs['LOD1_0_HAvg'] = createCityGML('LOD1_0_HAvg')
if SOLIDS:
CityGMLs['LOD1_0_HAvg_solid'] = createCityGML('LOD1_0_HAvg_solid')
CityGMLs['LOD1_0_HAvg_semantics'] = createCityGML('LOD1_0_HAvg_semantics')
CityGMLs['LOD1_0_HMax'] = createCityGML('LOD1_0_HMax')
if SOLIDS:
CityGMLs['LOD1_0_HMax_solid'] = createCityGML('LOD1_0_HMax_solid')
CityGMLs['LOD1_0_HMax_semantics'] = createCityGML('LOD1_0_HMax_semantics')
CityGMLs['LOD1_0_HMedian'] = createCityGML('LOD1_0_HMedian')
if SOLIDS:
CityGMLs['LOD1_0_HMedian_solid'] = createCityGML('LOD1_0_HMedian_solid')
CityGMLs['LOD1_0_HMedian_semantics'] = createCityGML('LOD1_0_HMedian_semantics')
#-- LOD1.1
if VARIANTS:
CityGMLs['LOD1_1_F0_H0'] = createCityGML('LOD1_1_F0_H0')
CityGMLs['LOD1_1_F0_H1'] = createCityGML('LOD1_1_F0_H1')
CityGMLs['LOD1_1_F0_H2'] = createCityGML('LOD1_1_F0_H2')
CityGMLs['LOD1_1_F0_H3'] = createCityGML('LOD1_1_F0_H3')
if VARIANTS:
CityGMLs['LOD1_1_F0_H4'] = createCityGML('LOD1_1_F0_H4')
CityGMLs['LOD1_1_F0_H5'] = createCityGML('LOD1_1_F0_H5')
CityGMLs['LOD1_1_F0_H6'] = createCityGML('LOD1_1_F0_H6')
CityGMLs['LOD1_1_F0_HAvg'] = createCityGML('LOD1_1_F0_HAvg')
CityGMLs['LOD1_1_F0_HMed'] = createCityGML('LOD1_1_F0_HMed')
if VARIANTS:
CityGMLs['LOD1_1_F1_H0'] = createCityGML('LOD1_1_F1_H0')
CityGMLs['LOD1_1_F1_H1'] = createCityGML('LOD1_1_F1_H1')
CityGMLs['LOD1_1_F1_H2'] = createCityGML('LOD1_1_F1_H2')
CityGMLs['LOD1_1_F1_H3'] = createCityGML('LOD1_1_F1_H3')
CityGMLs['LOD1_1_F1_H4'] = createCityGML('LOD1_1_F1_H4')
CityGMLs['LOD1_1_F1_H5'] = createCityGML('LOD1_1_F1_H5')
CityGMLs['LOD1_1_F1_H6'] = createCityGML('LOD1_1_F1_H6')
CityGMLs['LOD1_1_F1_HAvg'] = createCityGML('LOD1_1_F1_HAvg')
CityGMLs['LOD1_1_F1_HMed'] = createCityGML('LOD1_1_F1_HMed')
if VARIANTS:
CityGMLs['LOD1_1_Fd_H0'] = createCityGML('LOD1_1_Fd_H0')
CityGMLs['LOD1_1_Fd_H1'] = createCityGML('LOD1_1_Fd_H1')
CityGMLs['LOD1_1_Fd_H2'] = createCityGML('LOD1_1_Fd_H2')
CityGMLs['LOD1_1_Fd_H3'] = createCityGML('LOD1_1_Fd_H3')
CityGMLs['LOD1_1_Fd_H4'] = createCityGML('LOD1_1_Fd_H4')
CityGMLs['LOD1_1_Fd_H5'] = createCityGML('LOD1_1_Fd_H5')
CityGMLs['LOD1_1_Fd_H6'] = createCityGML('LOD1_1_Fd_H6')
CityGMLs['LOD1_1_Fd_HAvg'] = createCityGML('LOD1_1_Fd_HAvg')
CityGMLs['LOD1_1_Fd_HMed'] = createCityGML('LOD1_1_Fd_HMed')
if SOLIDS:
if VARIANTS:
CityGMLs['LOD1_1_F0_H0_solid'] = createCityGML('LOD1_1_F0_H0_solid')
CityGMLs['LOD1_1_F0_H1_solid'] = createCityGML('LOD1_1_F0_H1_solid')
CityGMLs['LOD1_1_F0_H2_solid'] = createCityGML('LOD1_1_F0_H2_solid')
CityGMLs['LOD1_1_F0_H3_solid'] = createCityGML('LOD1_1_F0_H3_solid')
if VARIANTS:
CityGMLs['LOD1_1_F0_H4_solid'] = createCityGML('LOD1_1_F0_H4_solid')
CityGMLs['LOD1_1_F0_H5_solid'] = createCityGML('LOD1_1_F0_H5_solid')
CityGMLs['LOD1_1_F0_H6_solid'] = createCityGML('LOD1_1_F0_H6_solid')
CityGMLs['LOD1_1_F0_HAvg_solid'] = createCityGML('LOD1_1_F0_HAvg_solid')
CityGMLs['LOD1_1_F0_HMed_solid'] = createCityGML('LOD1_1_F0_HMed_solid')
if VARIANTS:
CityGMLs['LOD1_1_F1_H0_solid'] = createCityGML('LOD1_1_F1_H0_solid')
CityGMLs['LOD1_1_F1_H1_solid'] = createCityGML('LOD1_1_F1_H1_solid')
CityGMLs['LOD1_1_F1_H2_solid'] = createCityGML('LOD1_1_F1_H2_solid')
CityGMLs['LOD1_1_F1_H3_solid'] = createCityGML('LOD1_1_F1_H3_solid')
CityGMLs['LOD1_1_F1_H4_solid'] = createCityGML('LOD1_1_F1_H4_solid')
CityGMLs['LOD1_1_F1_H5_solid'] = createCityGML('LOD1_1_F1_H5_solid')
CityGMLs['LOD1_1_F1_H6_solid'] = createCityGML('LOD1_1_F1_H6_solid')
CityGMLs['LOD1_1_F1_HAvg_solid'] = createCityGML('LOD1_1_F1_HAvg_solid')
CityGMLs['LOD1_1_F1_HMed_solid'] = createCityGML('LOD1_1_F1_HMed_solid')
if VARIANTS:
CityGMLs['LOD1_1_Fd_H0_solid'] = createCityGML('LOD1_1_Fd_H0_solid')
CityGMLs['LOD1_1_Fd_H1_solid'] = createCityGML('LOD1_1_Fd_H1_solid')
CityGMLs['LOD1_1_Fd_H2_solid'] = createCityGML('LOD1_1_Fd_H2_solid')
CityGMLs['LOD1_1_Fd_H3_solid'] = createCityGML('LOD1_1_Fd_H3_solid')
CityGMLs['LOD1_1_Fd_H4_solid'] = createCityGML('LOD1_1_Fd_H4_solid')
CityGMLs['LOD1_1_Fd_H5_solid'] = createCityGML('LOD1_1_Fd_H5_solid')
CityGMLs['LOD1_1_Fd_H6_solid'] = createCityGML('LOD1_1_Fd_H6_solid')
CityGMLs['LOD1_1_Fd_HAvg_solid'] = createCityGML('LOD1_1_Fd_HAvg_solid')
CityGMLs['LOD1_1_Fd_HMed_solid'] = createCityGML('LOD1_1_Fd_HMed_solid')
if VARIANTS:
CityGMLs['LOD1_1_F0_H0_semantics'] = createCityGML('LOD1_1_F0_H0_semantics')
CityGMLs['LOD1_1_F0_H1_semantics'] = createCityGML('LOD1_1_F0_H1_semantics')
CityGMLs['LOD1_1_F0_H2_semantics'] = createCityGML('LOD1_1_F0_H2_semantics')
CityGMLs['LOD1_1_F0_H3_semantics'] = createCityGML('LOD1_1_F0_H3_semantics')
if VARIANTS:
CityGMLs['LOD1_1_F0_H4_semantics'] = createCityGML('LOD1_1_F0_H4_semantics')
CityGMLs['LOD1_1_F0_H5_semantics'] = createCityGML('LOD1_1_F0_H5_semantics')
CityGMLs['LOD1_1_F0_H6_semantics'] = createCityGML('LOD1_1_F0_H6_semantics')
CityGMLs['LOD1_1_F0_HAvg_semantics'] = createCityGML('LOD1_1_F0_HAvg_semantics')
CityGMLs['LOD1_1_F0_HMed_semantics'] = createCityGML('LOD1_1_F0_HMed_semantics')
if VARIANTS:
CityGMLs['LOD1_1_F1_H0_semantics'] = createCityGML('LOD1_1_F1_H0_semantics')
CityGMLs['LOD1_1_F1_H1_semantics'] = createCityGML('LOD1_1_F1_H1_semantics')
CityGMLs['LOD1_1_F1_H2_semantics'] = createCityGML('LOD1_1_F1_H2_semantics')
CityGMLs['LOD1_1_F1_H3_semantics'] = createCityGML('LOD1_1_F1_H3_semantics')
CityGMLs['LOD1_1_F1_H4_semantics'] = createCityGML('LOD1_1_F1_H4_semantics')
CityGMLs['LOD1_1_F1_H5_semantics'] = createCityGML('LOD1_1_F1_H5_semantics')
CityGMLs['LOD1_1_F1_H6_semantics'] = createCityGML('LOD1_1_F1_H6_semantics')
CityGMLs['LOD1_1_F1_HAvg_semantics'] | |
-3.09699342998118032, 6.33537134263541457],
[-1.47214250606392016, -2.97011207363773355, 6.07581622020240708],
[-1.4762301901536552, -2.85321801458269464, 5.83669163417894676],
[-1.48000830776230541, -2.74517669988169555, 5.61567668388842467],
[-1.48351074240884162, -2.6450191531320435, 5.41078918064620229],
[-1.48676660672164096, -2.55191283243266742, 5.22032603330247813],
[-1.48980105357868231, -2.46513843397911403, 5.04281579646597056],
[-1.49263592719925375, -2.38407127260991869, 4.87698058157811598],
[-1.49529028985712831, -2.30816621924254717, 4.72170524414616644],
[-1.49778085107991976, -2.23694542693754261, 4.57601227553962531],
[-1.50012231976652277, -2.1699882612974859, 4.43904120408898706],
[-1.50232769490392948, -2.1069229867555106, 4.31003158812377229],
[-1.50440850702196993, -2.04741986162753342, 4.18830889085110591],
[-1.50637501985706135, -1.99118537107542615, 4.07327268300429601],
[-1.50823639967096113, -1.93795738505207304, 3.96438673768260985],
[-1.51000085811930762, -1.88750107264913436, 3.86117067252790314],
[-1.51167577336826819, -1.83960543849048497, 3.76319286439031808],
[-1.51326779322732641, -1.79408037341384063, 3.67006441604874789],
[-1.51478292333844222, -1.7507541325017173, 3.58143399713938271],
[-1.51622660288808153, -1.7094711699228371, 3.49698341499432486],
[-1.51760376985418888, -1.67009027304527935, 3.41642379768610382],
[-1.51891891743776686, -1.6324829486480672, 3.33949229277789428],
[-1.52017614303786619, -1.59653202236853531, 3.26594920227998209],
[-1.52137919089481732, -1.56213041922235729, 3.19557548801790858],
[-1.52253148933653115, -1.52918009846027148, 3.12817059271981623],
[-1.5236361834083274, -1.4975911204440826, 3.06355053116930787],
[-1.52469616354029025, -1.46728082683793626, 3.00154621316195191],
[-1.52571409080242892, -1.43817311837933848, 2.94200196607599462],
[-1.52669241921227195, -1.41019781694306245, 2.88477422987700383],
[-1.52763341548853537, -1.38329010063922508, 2.82973040152502353],
[-1.27213489944898095, -8.23146111408164138, 17.6041020644336932],
[-1.30437902272597639, -7.35803551281320001, 15.7361623126300483],
[-1.33043684159576836, -6.65218420797884136, 14.226603044791112],
[-1.35193286255940559, -6.06990264822590309, 12.9813145272285784],
[-1.36996858431331447, -5.58135349164683614, 11.9364855355447119],
[-1.38531729610211496, -5.16558991766890241, 11.0473184375600848],
[-1.39853784704409678, -4.80747384921408294, 10.2814383911601173],
[-1.41004415968819008, -4.49579297291886615, 9.61486633526512158],
[-1.42014934946801441, -4.22206563355355691, 9.02946309357703747],
[-1.42909465180157769, -3.97975725818160875, 8.51125359079293986],
[-1.43706892890169424, -3.76375195664729345, 8.04929679819341715],
[-1.4442221511115958, -3.56998733982641658, 7.63490474267422314],
[-1.45067491786679104, -3.39519661628397884, 7.2610909452800545],
[-1.45652531226088566, -3.23672291613530172, 6.92217332746208758],
[-1.46185392155978233, -3.09238329560723191, 6.6134833662872845],
[-1.46672757174205448, -2.96036757599313738, 6.33114974774896044],
[-1.47120214462486798, -2.83916203348658724, 6.07193516707017888],
[-1.47532473016408905, -2.72749109778234899, 5.83311164321179643],
[-1.47913529004584743, -2.62427228879101948, 5.61236414489170343],
[-1.48266795731126644, -2.52858101246882638, 5.40771530166628622],
[-1.48595206164768401, -2.43962278777978003, 5.21746600750175649],
[-1.48901294560986019, -2.35671113693646905, 5.04014813603913137],
[-1.49187261987374375, -2.27924983591821917, 4.87448658090691911],
[-1.4945502933795034, -2.2067185539831593, 4.71936854385943771],
[-1.49706280537361547, -2.13866115053373607, 4.57381850602783135],
[-1.49942497989510026, -2.07467607281083044, 4.43697769207952675],
[-1.50164991847650486, -2.01440842722457925, 4.30808711367802122],
[-1.50374924326860326, -1.9575433936040989, 4.18647348495785643],
[-1.50573330011639195, -1.90380072428509317, 4.07153745807334744],
[-1.50761132907723594, -1.85293012512286004, 3.96274374486502534],
[-1.50939160831245034, -1.80470735776488622, 3.85961278103808603],
[-1.51108157607991811, -1.75893093511948129, 3.76171365897143639],
[-1.51268793461999618, -1.71541930730102643, 3.66865810947787674],
[-1.51421673899426201, -1.674008455169536, 3.58009535526030298],
[-1.51567347335998215, -1.63454982421174511, 3.4957076922346646],
[-1.51706311670563898, -1.59690854390067249, 3.41520668138757433],
[-1.51839019970813593, -1.56096188755042009, 3.33832985496578516],
[-1.51965885407985768, -1.52659793560533652, 3.26483785773777635],
[-1.52087285553800688, -1.4937144116891139, 3.19451195772603658],
[-1.52203566133754231, -1.46221766591379598, 3.12715187187473154],
[-1.52315044315365289, -1.43202178416155412, 3.06257386112732366],
[-1.52422011597233964, -1.40304780549755126, 3.00060905675726319],
[-1.52524736354334922, -1.37522303270301949, 2.94110198584895821],
[-1.52623466086329551, -1.348480423252711, 2.88390926882000231],
[-1.52718429408560086, -1.32275804999540236, 2.82889846601289463],
[-1.26986656208462168, -7.84329347169913405, 17.572712278259246],
[-1.30230041418723919, -7.01238684562359627, 15.7110857660281056],
[-1.32852055598775176, -6.34066628994154335, 14.206111854369027],
[-1.35015653685924009, -5.78638515796817199, 12.9642581753524979],
[-1.36831391167836314, -5.32122076763807961, 11.9220684341605541],
[-1.38376918739381716, -4.9252801585203656, 11.0349729265896759],
[-1.39708376371746046, -4.58418109969791754, 10.270748606704144],
[-1.40867359473545539, -4.28726746223834176, 9.60552067134828391],
[-1.41885342570588402, -4.02647593781399582, 9.02122347019754756],
[-1.42786581028488757, -3.79559266025096731, 8.50393498403847659],
[-1.43590068585176578, -3.589751714629414, 8.04275324634828159],
[-1.44310890689263394, -3.40508843199385502, 7.62901955834813705],
[-1.44961180565754044, -3.23849445944769698, 7.25576972938162612],
[-1.45550807719047182, -3.08744137585857681, 6.91733889210219122],
[-1.46087882339626129, -2.94985147031937389, 6.60907198469173895],
[-1.46579130593313622, -2.82400159858677613, 6.32710834350037299],
[-1.47030177775206972, -2.70845064322749662, 6.06821917923112952],
[-1.47445764680554769, -2.60198408259802516, 5.82968338505890848],
[-1.47829914873782342, -2.50357113899147565, 5.60919152807375809],
[-1.48186065381913168, -2.41233129691486559, 5.40477083427830607],
[-1.48517169814993411, -2.32750788516034479, 5.2147259998473432],
[-1.48825780469723679, -2.24844704306776766, 5.03759206553992644],
[-1.49114114249425, -2.17458083280411474, 4.87209658016350389],
[-1.49384106003630945, -2.10541357454781908, 4.71712898487975618],
[-1.49637452001988769, -2.04051070911802857, 4.57171566019063036],
[-1.49875645607686625, -1.97948965896944618, 4.43499945021487818],
[-1.5010000673590056, -1.92201228137433611, 4.30622275422202438],
[-1.5031170632483386, -1.86777859930266965, 4.1847134808186004],
[-1.50511786777441947, -1.81652156455533387, 4.06987331487278414],
[-1.50701179127186591, -1.76800266015135121, 3.96116786476767002],
[-1.50880717524392738, -1.72200818913709131, 3.85811834756667338],
[-1.5105115151876809, -1.67834612798631189, 3.76029453913123657],
[-1.51213156519565906, -1.63684344686128047, 3.66730877022967849],
[-1.51367342741224631, -1.59734381787221968, 3.57881079194704466],
[-1.51514262884287643, -1.55970564733825534, 3.49448336701261475],
[-1.51654418755417231, -1.52380037983912064, 3.41403847006812455],
[-1.51788266993597665, -1.48951103124558482, 3.33721400095736964],
[-1.51916224040230508, -1.45673091545426558, 3.26377093200586588],
[-1.52038670467094961, -1.42536253562901538, 3.19349082387357974],
[-1.52155954756916634, -1.39531661567478116, 3.12617365559516491],
[-1.52268396615651391, -1.36651125167892284, 3.0616359234044439],
[-1.52376289882792393, -1.33887116633415904, 2.99970897028677053],
[-1.52479905095480328, -1.3123270520512651, 2.94023751423866786],
[-1.52579491753530672, -1.28681499069242733, 2.88307834819422437],
[-1.52675280325302443, -1.26227593969738261, 2.82809918870294474],
[-1.2677100630409246, -7.45711801788779294, 17.5428701370810316],
[-1.3003232998998322, -6.66832461487093475, 15.6872336564838726],
[-1.32669705654630632, -6.03044116611957381, 14.186612843296297],
[-1.34846558481389311, -5.50394116250568644, 12.9480215847946276],
[-1.36673823712450959, -5.06199347083151796, 11.9083396401311443],
[-1.38229454085172887, -4.68574420627704669, 11.0232132452667528],
[-1.39569829440434523, -4.36155717001357868, 10.2605632424569446],
[-1.40736736512875615, -4.07932569602538031, 9.59661369989970581],
[-1.41761803493378191, -3.83140009441562679, 9.0133687221127623],
[-1.42669410148087539, -3.61188380121740593, 8.49695664236394776],
[-1.43478651753614161, -3.41615837508605136, 8.03651257738993507],
[-1.4420469731027532, -3.24055499573652428, 7.6234056274701727],
[-1.4485974933215604, -3.08212232621608573, 7.25069277242334209],
[-1.45453735212338531, -2.93845929721896049, 6.91272549670760394],
[-1.45994813844965932, -2.80759257394165251, 6.60486153019773692],
[-1.46489752642351534, -2.68788536958443203, 6.32325033194737696],
[-1.46944212047039136, -2.57796863240419549, 6.06467120773086954],
[-1.47362962978415313, -2.47668845341874455, 5.82640958666759712],
[-1.47750054960199617, -2.38306540258386468, 5.60616135957854222],
[-1.48108947503880217, -2.29626275199814289, 5.40195812407563025],
[-1.48442613787594291, -2.21556139981484046, 5.2121081930644122],
[-1.48753623214728004, -2.14033990237018701, 5.03514962032586499],
[-1.49044207706849874, -2.07005844037291498, 4.86981248097728248],
[-1.49316315350890472, -2.00424584363611391, 4.71498834715395798],
[-1.49571654128131093, -1.94248901465105295, 4.56970540696660255],
[-1.49811727800328121, -1.88442424906073125, 4.43310804591537],
[-1.50037865546410543, -1.8297300676391528, 4.30443998412110673],
[-1.50251246583629428, -1.77812126134472703, 4.1830302673134705],
[-1.5045292073627512, -1.72934391650890928, 4.06828156358720872],
[-1.50643825709337653, -1.68317123697583937, 3.95966033498566317],
[-1.50824801666940811, -1.63940001811892211, 3.8566885426247639],
[-1.50996603593732526, -1.59784765707653453, 3.75893661327254724],
[-1.51159911822875581, -1.55834960642139797, 3.66601744910633887],
[-1.51315341040218398, -1.52075719638410445, 3.57758130449360578],
[-1.51463448015907387, -1.48493576486183709, 3.49331138683747211],
[-1.516047382684375, -1.4507630456309808, 3.41292006484688271],
[-1.51739671829246925, -1.41812777410511148, 3.33614558858227461],
[-1.51868668246372729, -1.3869284771358239, 3.26274924246202591],
[-1.51992110941828384, -1.35707241912346777, 3.19251286598795758],
[-1.52110351018045686, -1.32847468138031144, 3.12523668794718246],
[-1.52223710592964512, -1.30105735549542323, 3.06073742880298294],
[-1.52332485730492473, -1.27474883456479038, 2.99884663331366985],
[-1.52436949022473023, -1.24948318870879516, 2.93940920143744044],
[-1.52537351869569715, -1.22519961341019878, 2.88228209054749218],
[-1.52633926501255002, -1.20184194095476382, 2.82733316609608165],
[-1.26566665629569663, -7.07284307929948142, 17.5145929937389759],
[-1.29844892954344648, -6.32577683623730369, 15.6646210602613216],
[-1.32496756760636214, -5.72145086011838266, 14.1681191036089409],
[-1.34686119442111951, -5.22252299877577109, 12.9326161627578937],
[-1.36524270700396211, -4.80363174686579253, 11.8953091415808174],
[-1.38089445913901976, -4.44694825824430051, 11.0120481868628595],
[-1.39438249792004298, -4.1395730407001281, 10.2508900823653164],
[-1.40612648675465368, -3.87194249975919158, 9.58815234808789185],
[-1.41644415162652382, -3.6368160649870207, 9.00590504513891155],
[-1.42558046027823959, -3.42861123358057629, 8.49032413368242622],
[-1.4337273212165571, -3.2429546551326891, 8.03057981705226354],
[-1.4410372113635519, -3.07637157482106627, 7.61806750501742957],
[-1.44763280879186507, -2.92606631564313213, 7.24586421847417572],
[-1.4536139331839204, -2.78976411419136294, 6.90833692487913709],
[-1.45906263282406012, -2.66559519458241745, 6.60085546868961792],
[-1.4640469709849937, -2.55200848153347559, 6.31957889769210635],
[-1.46862388380388942, -2.44770647300648259, 6.06129418710289425],
[-1.47284136485866601, -2.35159545649703006, 5.82329295937817015],
[-1.47674015451575324, -2.2627470109515575, 5.60327615080372521],
[-1.48035506025658825, -2.18036791820737852, 5.3992795027187972],
[-1.48371599872328619, -2.10377641610018218, 5.20961475671124052],
[-1.48684882558894405, -2.03238328677625457, 5.03282282330646069],
[-1.48977600200062188, -1.96567666930637608, 4.86763617320342057],
[-1.49251713394963792, -1.90320976812369991, 4.71294839948526612],
[-1.49508941196610046, -1.844590832945189, 4.56778940474058626],
[-1.49750797198571695, -1.78947493507727207, 4.43130503659924457],
[-1.49978619339790953, -1.73755717527806564, 4.30274026825436806],
[-1.50193594667352803, -1.68856704063887775, 4.18142522431890562],
[-1.50396780025020216, -1.64226368992838201, 4.06676350584896706],
[-1.50589119428747109, -1.59843199393642199, 3.95822238498466605],
[-1.50771458732019359, -1.55687919342846071, 3.85532452898995093],
[-1.5094455806172169, -1.51743206517074691, 3.75764098243598177],
[-1.51109102410167107, -1.47993450814081218, 3.6647851898960635],
[-1.51265710694555078, -1.44424547899323952, 3.57640788350694283],
[-1.5141494353646352, -1.41023721921216016, 3.4921926928385334],
[-1.51557309967505738, -1.37779372697732483, 3.4118523607517437],
[-1.51693273230194081, -1.34680943522134955, 3.33512546985076241],
[-1.51823255813306734, -1.31718806413371126, 3.2617736009205851],
[-1.51947643837074797, -1.28884162183233086, 3.1915788582742719],
[-1.52066790884074976, -1.26168953135258732, 3.12434170790206434],
[-1.52181021355873058, -1.23565786571008052, 3.0598790832452254],
[-1.52290633422539501, -1.21067867574403776, 2.99802272072405174],
[-1.52395901621500962, -1.18668939787234362, 2.93861769315230292],
[-1.52497079153420723, -1.16363233088955598, 2.88152111412387368],
[-1.52594399915528056, -1.14145417259646997, 2.82660099056125791],
[-1.26373760188410533, -6.69037553938643992, 17.4878982848962217],
[-1.29667855429063428, -5.98467025057215807, 15.6432630716344434],
[-1.32333331204305882, -5.41363627653978607, 14.1506437117331654],
[-1.34534455028057232, -4.94208202143883746, 12.9180532838165139],
[-1.36382846299116833, -4.54609487663722778, 11.8829868858767522],
[-1.37957003940021927, -4.20885774732270246, 11.0014865006407323],
[-1.39313742701876597, -3.91819901349027955, 10.2417368658176482],
[-1.40495196910250963, -3.66509209331089458, 9.5801434994598651],
[-1.41533274367012551, -3.44270126838679191, 8.99883859319992041],
[-1.42452581488744356, -3.2457550212625299, 8.48404298613344565],
[-1.4327239874596458, -3.07012283027066912, 8.02495995356860981],
[-1.44008047663640282, -2.91252231229834235, 7.6130097107576109],
[-1.44671857341285914, -2.77031216185441087, 7.24128817862500185],
[-1.45273861000531923, -2.64134292728239872, 6.90417692936871408],
[-1.45822306624809439, -2.52384761466016316, 6.59705723720892667],
[-1.46324037114170635, -2.41636024592208409, 6.31609719836855898],
[-1.4678477726646888, -2.31765437789161366, 6.05809102665557919],
[-1.47209353181637237, -2.22669609854577333, 5.8203361909210356],
[-1.47601861952780178, -2.14260767350809944, 5.60053839103089324],
[-1.47965804307357462, -2.06463912987010412, 5.39673728113268858],
[-1.48304189304702372, -1.9921458264810763, 5.20724784088525716],
[-1.48619617724979625, -1.9245705892443401, 5.0306136791081224],
[-1.48914349043212346, -1.86142936304015394, 4.86556952950217969],
[-1.49190355638415562, -1.80229959831642961, 4.71101089448822208],
[-1.49449366988853316, -1.74681078298659731, 4.56596929663996676],
[-1.49692905947092636, -1.69463667109916205, 4.4295919652897533],
[-1.49922318702772661, -1.6454888638109193, 4.3011250579057636],
[-1.50138799678443302, -1.59911147586507685, 4.17989971885843659],
[-1.50343412330745441, -1.55527667928357349, 4.06532043112734076],
[-1.50537106621654382, -1.51378096044121713, 3.95685523270884865],
[-1.50720733765554082, -1.47444196074998568, 3.85402745851593398],
[-1.50895058735134047, -1.43709579747746696, 3.75640873729526747],
[-1.51060770913663456, -1.4015947816731662, 3.66361302654013743],
[-1.51218493206451643, -1.36780546618901, 3.5752915102329399],
[-1.51368789865392039, -1.33560696940051793, 3.4911282172385758],
[-1.51512173233771019, -1.30488953024300391, 3.41083624432962962],
[-1.51649109581264652, -1.27555325816017007, 3.33415448869088893],
[-1.5178002416915175, -1.24750704796563094, 3.26084481148794048],
[-1.51905305661681256, -1.22066763478136719, 3.19068956758129563],
[-1.52025309979976964, -1.19495876840156567, 3.12348944740520329],
[-1.52140363678984492, -1.17031048983834207, 3.05906158593855526],
[-1.52250766914922564, -1.14665849559355904, 2.99723790098205312],
[-1.52356796060034538, -1.12394357749205787, 2.93786362894451836],
[-1.52458706012594547, -1.10211112780188847, 2.88079603128215833],
[-1.52556732242803039, -1.08111070093325012, 2.82590324882830091],
[-1.26192416103064686, -6.30962080515323276, 17.4628034636740956],
[-1.29501342218144333, -5.6449303018165482, 15.6231747470863898],
[-1.32179550688237435, -5.10693718568190125, 14.1341996815490294],
[-1.3439168294310011, -4.66256859190347228, 12.9043442499446126],
[-1.3624966381353294, -4.28934126820381501, 11.8713827452346763],
[-1.3783223695168525, -3.97143733589601533, 10.991536861994561],
[-1.39196412483993015, -3.69740470660606491, 10.23311126151855],
[-1.40384481188925503, -3.45874808726338179, 9.5725939709260075],
[-1.41428476915332335, -3.24903257778466203, 8.99217545792755146],
[-1.4235310837878723, -3.06329473726504098, 8.47811866990118723],
[-1.43177739722945141, -2.89764473248817467, 8.01965792138639166],
[-1.43917761445066339, -2.74899094895070517, 7.60823671459592266],
[-1.44585559929538698, -2.61484523276825254, 7.23696871774978145],
[-1.45191216320344041, -2.49318250247055451, 6.90024922007291686],
[-1.457430189659632, -2.38233781002055212, 6.59347023302747015],
[-1.46247844985890652, -2.28092969244049604, 6.31280835466619727],
[-1.46711448363939412, -2.18780230016400834, 6.05506460133627122],
[-1.47138680235120534, -2.10198114621600762, 5.81754193702724454],
[-1.47533659282261231, -2.02263887725680735, 5.59795053970192935],
[-1.47899904945125438, -1.94906851478072274, 5.39433374237841257],
[-1.4824044257819784, -1.88066233121594251, 5.20500956963000583],
[-1.48557887215051609, -1.81689502349343668, 5.02852416796271839],
[-1.48854510850467459, -1.75731019754024054, 4.86361439966284248],
[-1.49132296903881567, -1.70150942776240721, 4.70917756330713289],
[-1.49392984625748038, -1.64914333677773817, 4.56424670561132828],
[-1.49638105548759381, -1.59990427316283612, 4.42797035601832079],
[-1.49869013598350564, -1.55352026290973133, 4.29959578646487461],
[-1.50086910113046135, -1.50974998338567112, 4.1784551010762252],
[-1.50292864751033672, -1.46837856365952435, 4.06395361295180102],
[-1.50487833051022246, -1.4292140569091496, 3.9555600810338527],
[-1.50672671255812429, -1.3920844626895712, 3.85279846879672494],
[-1.5084814888376652, -1.35683520160001603, 3.75524095501651534],
[-1.51014959437493657, -1.32332696414298545, 3.66250198017220452],
[-1.51173729564013626, -1.29143387064735271, 3.57423315376223183],
[-1.51325026921496164, -1.26104189101246789, 3.49011888071391851],
[-1.51469366960746976, -1.23204748245583673, 3.40987259114983043],
[-1.51607218792184839, -1.20435641096595414, 3.33323347858518559],
[-1.51739010279019881, -1.17788272819330486, 3.25996366832323492],
[-1.51865132473150566, -1.15254788037658895, 3.18984575127755976],
[-1.51985943490642228, -1.12827992984319558, 3.12268062936013813],
[-1.52101771907693917, -1.10501287283367366, 3.0582856274602559],
[-1.522129197449132, -1.08268604002657898, 2.99649283430885571],
[-1.52319665096966861, -1.06124356829853927, 2.93714764049684041],
[-1.52422264455825651, -1.04063393403575732, 2.88010744684602304],
[-1.52520954768452821, -1.02080953978885747, 2.8252405204156319],
[-1.26022759105903748, -5.93048278145429464, 17.4393259292140321],
[-1.29345477328647251, -5.30648112117527226, 15.6043710469560359],
[-1.32035535871161436, -4.80129221349678037, 14.1187999153139678],
[-1.34257919699879058, -4.38393207183278566, 12.8915002487332853],
[-1.3612483527352488, -4.03332845254888639, 11.8605064807765679],
[-1.37715252419575518, -3.7346509130732346, 10.9822078412559794],
[-1.39086362119682816, -3.47715905299207062, 10.2250208402048077],
[-1.40280600153546042, -3.25288348182135767, 9.56551048873092924],
[-1.41330117301867664, -3.05578632004038164, 8.98592164736874821],
[-1.42259717254231277, -2.8812094633768357, 8.47255657824250896],
[-1.43088841885431761, -2.72550175019869956, 8.01467858417805168],
[-1.43832945801288647, -2.58576082339395352, 7.60375292129284475],
[-1.44504468655884977, -2.45965053031293568, 7.23290984070147669],
[-1.45113536174313928, -2.34526927150406372, 6.89655745151663702],
[-1.45668474268846926, -2.2410534502899524, 6.59009780225889141],
[-1.46176191913461251, -2.14570556937190871, 6.30971543993426831],
[-1.46642470268296599, -2.05813993359012759, 6.05221774221346909],
[-1.47072183785365196, -1.97744112652591864, 5.81491281269027915],
[-1.47469471260055474, -1.90283188722652219, 5.59551501837549203],
[-1.47837869567667579, -1.83364799463773753, 5.39207113423159701],
[-1.48180419248791839, -1.76931843879154438, 5.20290203407138474],
[-1.48499748622569827, -1.70934962443245841, 5.02655623934669737],
[-1.48798141355227931, -1.65331268172475432, 4.86177260469685546],
[-1.49077591160028744, -1.60083319366473842, 4.70745011012037118],
[-1.49339846399996423, -1.55158281974022261, 4.56262322929859199],
[-1.49586446703037823, -1.50527241965321079, 4.42644170904272372],
[-1.49818753209942335, -1.46164637277992515, 4.29815386495596741],
[-1.50037973710615069, -1.42047785761529077, 4.17709270004996469],
[-1.50245183648776548, -1.38156490711518676, 4.06266430498559572],
[-1.50441343766255797, -1.34472709511737043, 3.9543381140795173],
[-1.5062731499781532, -1.30980273911143774, 3.85163867969957119],
[-1.50803871103693798, -1.27664652786195809, 3.75413869599407057],
[-1.50971709430837642, -1.24512750046051668, 3.66145305604170668],
[-1.51131460118451022, -1.21512731753528458, 3.57323376812719617],
[-1.51283694004050551, -1.1865387765023574, 3.48916558965224466],
[-1.51428929439131665, -1.15926453159143872, 3.4089622632110741],
[-1.51567638185951892, -1.13321598643702348, 3.33236326011637241],
[-1.5170025053678311, -1.10831233268882645, 3.25913095331308034],
[-1.51827159772668341, -1.08447971266191678, 3.18904815504644867],
[-1.51948726059019168, -1.06165048774860549, 3.12191596553574335],
[-1.52065279859316815, -1.03976259732866194, 3.05755188776959663],
[-1.52177124935071184, -1.01875899538122949, 2.99578817079292836],
[-1.52284540989370121, -0.998587154028656276, 2.9364703498155178],
[-1.52387786002470249, -0.979198624915471494, 2.87945595639205809],
[-1.52487098300483614, -0.96054865071170803, 2.8246133759991161],
[-1.25864914010852136, -5.55286385341994571, 17.4174829535605653],
[-1.29200383468543212, -4.96924551802089187, 15.5868667748588656],
[-1.31901405891540935, -4.49663883721162261, 14.104457152720455],
[-1.34133280168199942, -4.10612082147550783, 12.8795323100281784],
[-1.36008471005932963, -3.77801308349813825, 11.8503677052391598],
[-1.37606156091161003, -3.49846159553799119, 10.9735078713374996],
[-1.38983692872794862, -3.25743030170612924, 10.2174730463515591],
[-1.40183650751192146, -3.04747066850417658, 9.55889966354143361],
[-1.41238288359142805, -2.86293827755019192, 8.98008306391576916],
[-1.42172497049584812, -2.69947779208072403, 8.46736200782653725],
[-1.4300579048844364, -2.55367483015077923, 8.01002671723960979],
[-1.43753682521235082, -2.42281487395340012, 7.59956265463383218],
[-1.44428662047496803, -2.30471269224728914, 7.22911547801566456],
[-1.45040896021122756, -2.19758933365337539, 6.8931052098927541],
[-1.45598745105018668, -2.09998190055315392, 6.58694322806839239],
[-1.46109147750669943, -2.01067634519270522, 6.30682146942111999],
[-1.46577910273199219, -1.928656714121042, 6.04955322662633588],
[-1.47009928712369908, -1.85306632830718354, 5.81245138312353227],
[-1.47409360488458985, -1.78317774784426186, 5.59323420240483493],
[-1.47779758625732582, -1.71836928634572761, 5.3899516615044325],
[-1.48124177732759921, -1.65810646715776011, 5.20092728531817361],
[-1.4844525843802594, -1.60192724933121511, 5.02471180540224438],
[-1.48745295223178498, -1.54943015857477562, 4.86004593072954716],
[-1.49026291341617023, -1.50026467793574181, 4.70583020645844385],
[-1.49290003602799048, -1.45412341171557502, 4.56110043474785432],
[-1.49537979138959432, -1.41073565225433417, 4.42500749590442766],
[-1.49771585780306182, -1.36986206506377606, 4.29680067741671134],
[-1.4999203729849202, -1.33129027188010673, 4.17581381946393471],
[-1.50200414502115387, -1.2948311595009947, 4.06145373696812051],
[-1.50397682958257106, -1.26031577897980807, 3.95319049339999884],
[-1.5058470795326091, -1.22759272787984419, 3.85054918978311189],
[-1.50762267181824083, -1.19652593001447682, 3.75310300047874223],
[-1.50931061556899238, -1.16699274399664388, 3.66046724033613913],
[-1.51091724457267307, -1.13888234515528142, 3.57229428930373194],
[-1.51244829670014291, -1.1120943358089288, 3.48826923332067285],
[-1.51390898237862781, -1.08653754716169426, 3.40810610626384802],
[-1.51530404383569528, -1.06212900268857124, 3.33154463843315263],
[-1.51663780653329661, -1.03879301817349079, 3.25834743367084956],
[-1.51791422396729248, -1.01646041783524033, 3.18829751060987077],
[-1.51913691680974861, -0.995067849438699747, 3.1211961544060558],
[-1.52030920721042628, -0.974557184109247632, 3.05686103415600652],
[-1.52143414894165763, -0.954874988875517361, 2.99512454843953924],
[-1.52251455396271917, -0.935972061862327287, 2.93583236737483322],
[-1.52355301589021574, -0.917803021620612003, 2.87884214448331965],
[-1.52455193078684004, -0.90032594337805516, 2.82402237572916626],
[-1.25719004169050197, -5.17666487754912463, 17.3972916063347398],
[-1.2906618152933258, -4.63314497797604208, 15.5706765152694828],
[-1.31777277876756105, -4.19291338698769511, 14.0911839184122147],
[-1.34017877109853822, -3.82908220313868064, 12.8684512612624928],
[-1.35900679193765295, -3.52335094206058264, 11.8409758445678843],
[-1.37505051572908399, -3.2628317322385052, 10.9654452144158299],
[-1.38888503893487525, -3.03818602267004012, 10.2104751690446687],
[-1.40093727857930239, -2.84248143479858406, 9.55276796480524304],
[-1.41153080900743411, -2.67046369271676776, 8.97466548159456501],
[-1.42091534737937164, -2.51807783079888647, 8.46254013850625775],
[-1.42928668885904742, -2.38214448143174584, 8.00570698938311587],
[-1.43680051554102528, -2.2601356424240846, 7.5956701411466554],
[-1.44358216853034271, -2.15001599568348922, 7.22558947120772466],
[-1.4497336960122631, -2.05012845900724061, 6.88989599973504774],
[-1.45533902386655312, -1.95911022443574501, 6.58400971855012695],
[-1.46046780749002836, -1.87583021145508222, 6.30412938921154442],
[-1.46517834125126534, -1.79934182258927344, 6.04707376805894992],
[-1.46951978402016481, -1.7288468047296055, 5.81016015446628131],
[-1.47353388126615048, -1.66366728530049279, 5.59111041238486983],
[-1.47725631175789673, -1.60322390423337646, 5.38797747815574102],
[-1.48071775098912828, -1.5470185458095369, 5.19908732716705124],
[-1.48394471849260912, -1.49462057977672869, 5.02299273417822079],
[-1.48696025860227721, -1.44565580697443585, 4.8584361227248003],
[-1.48978449164641913, -1.39979750892952159, 4.70431948536671296],
[-1.49243506345824484, -1.35675914859840474, 4.55967985296820988],
[-1.49492751443550365, -1.31628837749038086, 4.42366915435160113],
[-1.49727558446105413, -1.27816208429602196, 4.29553757615198784],
[-1.49949146632284624, -1.24218227979407403, 4.17461973316508406],
[-1.50158601750358245, -1.20817265776150307, 4.06032311054848005],
[-1.50356893810598002, -1.17597570578121347, 3.9521183540718634],
[-1.50544892106693529, -1.14545026602919009, 3.84953107261952754],
[-1.5072337795684021, -1.11646946634696453, 3.75213488511604876],
[-1.50893055558390898, -1.08891895763787261, 3.65954549691870668],
[-1.51054561274077526, -1.06269540594828427, 3.57141563213334035],
[-1.51208471607992845, -1.03770519730975486, 3.48743068095885445],
[-1.51355310082042882, -1.01386332112373223, 3.40730494706225695],
[-1.51495553185751453, -0.991092404018934725, 3.33077840064934838],
| |
/ N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
init : estimator, optional
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution. See :term:`the Glossary <warm_start>`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
*presort* parameter.
validation_fraction : float, optional, default 0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if ``n_iter_no_change`` is set to an integer.
.. versionadded:: 0.20
n_iter_no_change : int, default None
``n_iter_no_change`` is used to decide if early stopping will be used
to terminate training when validation score is not improving. By
default it is set to None to disable early stopping. If set to a
number, it will set aside ``validation_fraction`` size of the training
data as validation and terminate training when validation score is not
improving in all of the previous ``n_iter_no_change`` numbers of
iterations.
.. versionadded:: 0.20
tol : float, optional, default 1e-4
Tolerance for the early stopping. When the loss is not improving
by at least tol for ``n_iter_no_change`` iterations (if set to a
number), the training stops.
.. versionadded:: 0.20
Attributes
----------
n_estimators_ : int
The number of estimators as selected by early stopping (if
``n_iter_no_change`` is specified). Otherwise it is set to
``n_estimators``.
.. versionadded:: 0.20
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init_ : estimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data and
``max_features=n_features``, if the improvement of the criterion is
identical for several splits enumerated during the search of the best
split. To obtain a deterministic behaviour during fitting,
``random_state`` has to be fixed.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
<NAME>, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
<NAME>, Stochastic Gradient Boosting, 1999
<NAME>, <NAME> and <NAME>.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, criterion='friedman_mse', min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, min_impurity_decrease=0.,
min_impurity_split=None, init=None,
random_state=None, max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
presort='auto', validation_fraction=0.1,
n_iter_no_change=None, tol=1e-4):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
criterion=criterion, min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
warm_start=warm_start, presort=presort,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, tol=tol)
def _validate_y(self, y, sample_weight):
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_trim_classes = np.count_nonzero(np.bincount(y, sample_weight))
if n_trim_classes < 2:
raise ValueError("y contains %d class after sample_weight "
"trimmed classes with zero weights, while a "
"minimum of 2 classes are required."
% n_trim_classes)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
| |
range(9)]
for batch in tqdm(train_data.get_batches(
config.im_batch_size, num_batches=num_steps),
total=num_steps, ascii=True, smoothing=1):
# start from 0 or the previous step
global_step = sess.run(models[0].global_step) + 1
validation_performance = None
if (global_step % config.save_period == 0) or \
(config.load and isStart and ((config.ignore_vars is None) or \
config.force_first_eval)): # time to save model
tqdm.write("step:%s/%s (epoch:%.3f)" % (
global_step, num_steps,
(config.num_epochs*global_step/float(num_steps))))
tqdm.write("\tsaving model %s..." % global_step)
saver.save(sess, os.path.join(config.save_dir, "model"),
global_step=global_step)
tqdm.write("\tdone")
if config.skip_first_eval and isStart:
tqdm.write("skipped first eval...")
validation_performance = config.best_first
else:
# cat_id -> imgid -> {"dm","dscores"}
e = {one:{} for one in eval_target.keys()}
if config.add_act:
e_act = {one:{} for one in act_eval_target.keys()}
if config.use_small_object_head:
e_so = {one:{} for one in config.so_eval_target.keys()}
for val_batch_ in tqdm(val_data.get_batches(
config.im_batch_size, num_batches=num_val_steps, shuffle=False),
total=num_val_steps, ascii=True, smoothing=1):
batch_idx, val_batches = val_batch_
this_batch_num = len(val_batches)
# multiple image at a time for parallel inferencing with
# multiple gpu
scales = []
imgids = []
for val_batch in val_batches:
# load the image here and resize
image = cv2.imread(val_batch.data["imgs"][0], cv2.IMREAD_COLOR)
imgid = os.path.splitext(
os.path.basename(val_batch.data["imgs"][0]))[0]
imgids.append(imgid)
assert image is not None, image
image = image.astype("float32")
val_batch.data["imgdata"] = [image]
resized_image = resizeImage(image, config.short_edge_size,
config.max_size)
# rememember the scale and original image
ori_shape = image.shape[:2]
#print(image.shape, resized_image.shape
# average H/h and W/w ?
scale = (resized_image.shape[0]*1.0/image.shape[0] + \
resized_image.shape[1]*1.0/image.shape[1])/2.0
val_batch.data["resized_image"] = [resized_image]
scales.append(scale)
outputs = tester.step(sess, val_batch_)
# post process this batch, also remember the ground truth
for i in range(this_batch_num): # num gpu
imgid = imgids[i]
scale = scales[i]
if config.add_act:
if config.act_v2:
boxes, labels, probs, actsingleboxes, actsinglelabels = \
outputs[i]
actsingleboxes = actsingleboxes / scale
else:
boxes, labels, probs, actboxes, actlabels, actprobs = \
outputs[i]
actboxes = actboxes / scale
else:
if config.add_mask:
boxes, labels, probs, masks = outputs[i]
else:
if config.use_small_object_head:
boxes, labels, probs, so_boxes, so_labels, so_probs = \
outputs[i]
so_boxes = so_boxes / scale
else:
boxes, labels, probs = outputs[i]
if config.use_cpu_nms:
boxes, labels, probs = nms_wrapper(boxes, probs, config)
val_batch = val_batches[i]
boxes = boxes / scale
# each class"s detection box and prob
target_dt_boxes = gather_dt(boxes, probs, labels, eval_target,
targetid2class,
tococo=config.tococo,
coco_class_names=config.class_names)
# gt
anno = val_batch.data["gt"][0] # one val_batch is single image
gt_boxes = gather_gt(anno["boxes"], anno["labels"], eval_target,
targetid2class)
# gt_boxes and target_dt_boxes for this image
# eval on one single image
match_dt_gt(e, imgid, target_dt_boxes, gt_boxes, eval_target)
if config.use_small_object_head:
target_so_dt_boxes = gather_dt(
so_boxes, so_probs, so_labels, config.so_eval_target,
config.small_objects_targetid2class)
anno = val_batch.data["gt"][0] # one val_batch is single image
small_object_classids = [targetClass2id[one]
for one in config.small_objects]
idxs = [i for i in range(len(anno["labels"]))
if anno["labels"][i] in small_object_classids]
gt_so_boxes = [anno["boxes"][i] for i in idxs]
# convert the original classid to the small object class id
gt_so_labels = [
small_object_classids.index(anno["labels"][i])+1
for i in idxs]
gt_so_boxes = gather_gt(gt_so_boxes, gt_so_labels,
config.so_eval_target,
config.small_objects_targetid2class)
match_dt_gt(e_so, imgid, target_so_dt_boxes, gt_so_boxes,
config.so_eval_target)
# eval the act box as well, put stuff in e_act
if config.add_act and config.act_v2:
# for v2, we have the single and pair boxes
# actsingleboxes [K,4]
# actsinglelabels [K,num_act_class]
# first we filter the BG boxes
# we select topk act class for each box
topk = config.act_single_topk
single_act_boxes, single_act_labels, single_act_probs = \
gather_act_singles(actsingleboxes, actsinglelabels, topk)
target_act_dt_boxes = gather_dt(
single_act_boxes, single_act_probs, single_act_labels,
act_eval_target, targetsingleactid2class)
# to collect the ground truth, each label will be a stand
# alone boxes
anno = val_batch.data["gt"][0] # one val_batch is single image
gt_single_act_boxes = []
gt_single_act_labels = []
gt_obj_boxes = anno["boxes"]
for bid, label in zip(
anno["actSingleIdxs"], anno["actSingleLabels"]):
if label in act_eval_target:
gt_single_act_boxes.append(gt_obj_boxes[bid])
gt_single_act_labels.append(targetSingleAct2id[label])
gt_act_boxes = gather_gt(
gt_single_act_boxes, gt_single_act_labels,
act_eval_target, targetsingleactid2class)
match_dt_gt(e_act, imgid, target_act_dt_boxes,
gt_act_boxes, act_eval_target)
if config.add_act and not config.act_v2:
target_act_dt_boxes = gather_dt(actboxes, actprobs, actlabels,
act_eval_target,
targetactid2class)
#gt
anno = val_batch.data["gt"][0] # one val_batch is single image
gt_act_boxes = gather_gt(
anno["actboxes"], anno["actlabels"],
act_eval_target, targetactid2class)
# gt_boxes and target_dt_boxes for this image
match_dt_gt(e_act, imgid, target_act_dt_boxes,
gt_act_boxes, act_eval_target)
# we have the dm and g matching for each image in e & e_act
# max detection per image per category
aps, ars = aggregate_eval(e, maxDet=100)
aps_str = "|".join(["%s:%.5f" % (class_, aps[class_])
for class_ in aps])
ars_str = "|".join(["%s:%.5f" % (class_, ars[class_])
for class_ in ars])
#validation_performance = ars[eval_best]
# now we use average AR and average AP or weighted
average_ap, average_ar = weighted_average(
aps, ars, eval_target_weight)
ap_weight = 1.0
ar_weight = 0.0
validation_performance = average_ap*ap_weight + average_ar*ar_weight
if config.add_act:
obj_validation_performance = validation_performance
aps, ars = aggregate_eval(e_act, maxDet=100)
act_aps_str = "|".join(["%s:%.5f"%(class_, aps[class_])
for class_ in aps])
act_ars_str = "|".join(["%s:%.5f"%(class_, ars[class_])
for class_ in ars])
average_ap, average_ar = weighted_average(
aps, ars, act_eval_target_weight)
ap_weight = 0.9
ar_weight = 0.1
act_validation_performance = average_ap*ap_weight + \
average_ar*ar_weight
act_perf_weight = 0.5
obj_perf_weight = 0.5
validation_performance = obj_perf_weight \
* obj_validation_performance + \
act_perf_weight*act_validation_performance
tqdm.write("\tval in %s at step %s, Obj AP:%s, AR:%s, obj "
"performance %s" % (
num_val_steps, global_step, aps_str, ars_str,
obj_validation_performance))
tqdm.write("\tAct AP:%s, AR:%s, this step val:%.5f, previous"
" best val at %s is %.5f" % (
act_aps_str, act_ars_str, validation_performance,
best[1], best[0]))
else:
if config.use_small_object_head:
so_aps, so_ars = aggregate_eval(e_so, maxDet=100)
so_average_ap, so_average_ar = weighted_average(so_aps, so_ars)
so_val = so_average_ap*0.5 + so_average_ar*0.5
so_weight = 0.5
validation_performance = (1 - so_weight)*validation_performance \
+ so_weight*so_val
so_aps_str = "|".join(["%s:%.5f"%(class_, so_aps[class_])
for class_ in so_aps])
so_ars_str = "|".join(["%s:%.5f"%(class_, so_ars[class_])
for class_ in so_ars])
tqdm.write("\tval in %s at step %s, AP:%s, AR:%s, so_AP:%s, "
"so_AR:%s, this step val:%.5f, previous best val "
"at %s is %.5f" % (
num_val_steps, global_step, aps_str, ars_str, so_aps_str,
so_ars_str, validation_performance, best[1], best[0]))
else:
tqdm.write("\tval in %s at step %s, AP:%s, AR:%s, this step "
"val:%.5f, previous best val at %s is %.5f" % (
num_val_steps, global_step, aps_str, ars_str,
validation_performance, best[1], best[0]))
if validation_performance > best[0]:
tqdm.write("\tsaving best model %s..." % global_step)
bestsaver.save(sess, os.path.join(config.save_dir_best, "model"),
global_step=global_step)
tqdm.write("\tdone")
best = (validation_performance, global_step)
isStart = False
if config.exit_after_val:
print("exit after eval.")
break
# skip if the batch is not complete, usually the last few ones
if len(batch[1]) != config.gpu:
continue
try:
#loss, rpn_label_loss, rpn_box_loss, fastrcnn_label_loss, fastrcnn_box_loss, train_op,act_losses = trainer.step(sess,batch)
loss, wds, rpn_label_losses, rpn_box_losses, fastrcnn_label_losses, \
fastrcnn_box_losses, so_label_losses, act_losses, lr = \
trainer.step(sess, batch)
except Exception as e:
print(e)
bs = batch[1]
print("trainer error, batch files:%s"%([b.data["imgs"] for b in bs]))
sys.exit()
if math.isnan(loss):
tqdm.write("warning, nan loss: loss:%s,rpn_label_loss:%s, "
"rpn_box_loss:%s, fastrcnn_label_loss:%s, "
"fastrcnn_box_loss:%s" % (
loss, rpn_label_losses, rpn_box_losses, fastrcnn_label_losses,
fastrcnn_box_losses))
if config.add_act:
tqdm.write("\tact_losses:%s" % (act_losses))
print("batch:%s" % (batch[1][0].data["imgs"]))
sys.exit()
# use moving average to compute loss
loss_me.put(loss)
lr_me.put(lr)
for wd, rpn_label_loss, rpn_box_loss, fastrcnn_label_loss, \
fastrcnn_box_loss, so_label_loss, act_loss in zip(
wds, rpn_label_losses, rpn_box_losses, fastrcnn_label_losses,
fastrcnn_box_losses, so_label_losses, act_losses):
wd_me.put(wd)
rpn_label_loss_me.put(rpn_label_loss)
rpn_box_loss_me.put(rpn_box_loss)
fastrcnn_label_loss_me.put(fastrcnn_label_loss)
fastrcnn_box_loss_me.put(fastrcnn_box_loss)
so_label_loss_me.put(so_label_loss)
act_loss_me.put(act_loss)
if global_step % config.show_loss_period == 0:
tqdm.write("step %s, moving average: learning_rate %.6f, loss %.6f,"
" weight decay loss %.6f, rpn_label_loss %.6f, rpn_box_loss"
" %.6f, fastrcnn_label_loss %.6f, fastrcnn_box_loss %.6f, "
"so_label_loss %.6f, act_loss %.6f" % (
global_step, lr_me.me(), loss_me.me(), wd_me.me(),
rpn_label_loss_me.me(), rpn_box_loss_me.me(),
fastrcnn_label_loss_me.me(), fastrcnn_box_loss_me.me(),
so_label_loss_me.me(), act_loss_me.me()))
# save these for ploting later
stats.append({
"s":float(global_step),
"l":float(loss),
"val":validation_performance
})
isStart = False
# save the last model
if global_step % config.save_period != 0: # time to save model
print("saved last model without evaluation.")
saver.save(sess, os.path.join(config.save_dir, "model"),
global_step=global_step)
if config.write_self_sum:
self_summary_strs.writeTo(config.self_summary_path)
with open(config.stats_path, "w") as f:
json.dump(stats, f)
# given a list of images, do the forward, save each image result separately
def forward(config):
imagelist = config.imgpath
if config.extract_feat:
assert config.feat_path is not None
assert config.is_fpn
if not os.path.exists(config.feat_path):
os.makedirs(config.feat_path)
print("also extracting fpn features")
all_images = [line.strip() for line in open(config.imgpath, "r").readlines()]
if config.forward_skip > 1:
all_images.sort()
ori_num = len(all_images)
all_images = all_images[::config.forward_skip]
print("skiiping %s, got %s/%s" % (
config.forward_skip, len(all_images), ori_num))
if config.check_img_exist:
exist_imgs = []
for image in all_images:
if os.path.exists(image):
exist_imgs.append(image)
print("%s/%s image exists" % (len(exist_imgs), len(all_images)))
all_images = exist_imgs
print("total images to test:%s"%len(all_images))
if config.use_small_object_head:
if not os.path.exists(config.so_outpath):
os.makedirs(config.so_outpath)
models = []
for i in range(config.gpuid_start, config.gpuid_start+config.gpu):
models.append(get_model(config, i, controller=config.controller))
model_final_boxes = [model.final_boxes for model in models]
# [R]
model_final_labels = [model.final_labels for model in models]
model_final_probs = [model.final_probs for model in models]
if config.extract_feat:
model_feats = [model.fpn_feature for model in models]
if config.add_mask:
# [R,14,14]
model_final_masks = [model.final_masks for model | |
5 / 100.
HIGH_LUMINOSITY = 255 * 98 / 100.
hsl = self.get_value()
if hsl[1] <= LOW_SATURATION:
# If we're given a color with a very low saturation, the user is
# searching for a black/white/grey and we need to take saturation
# and lightness into consideration, but ignore hue.
clauses = [
Q('range', **{'colors.s': {
'lte': LOW_SATURATION,
}}),
Q('range', **{'colors.l': {
'gte': max(min(hsl[2] - 64, 255), 0),
'lte': max(min(hsl[2] + 64, 255), 0),
}})
]
elif hsl[2] <= LOW_LUMINOSITY:
# If we're given a color with a very low luminosity, we're
# essentially looking for pure black. We can ignore hue and
# saturation, they don't have enough impact to matter here.
clauses = [
Q('range', **{'colors.l': {'lte': LOW_LUMINOSITY}})
]
elif hsl[2] >= HIGH_LUMINOSITY:
# Same deal for very high luminosity, this is essentially white.
clauses = [
Q('range', **{'colors.l': {'gte': HIGH_LUMINOSITY}})
]
else:
# Otherwise, we want to do the opposite and just try to match the
# hue with +/- 10%. The idea is to keep the UI simple, presenting
# the user with a limited set of colors that still allows them to
# find all themes.
# Start by excluding low saturation and low/high luminosity that
# are handled above.
clauses = [
Q('range', **{'colors.s': {'gt': LOW_SATURATION}}),
Q('range', **{'colors.l': {
'gt': LOW_LUMINOSITY,
'lt': HIGH_LUMINOSITY
}}),
]
if hsl[0] - 26 < 0 or hsl[0] + 26 > 255:
# If the hue minus 10% is below 0 or above 255, we need to wrap
# the value to match the other end of the spectrum (since hue
# is an angular dimension on a cylinder). However we can't do a
# single range query with both lte & gte with a modulo, we'd
# end up with a range that's impossible to match. Instead we
# need to split into 2 queries and match either with a |.
clauses.append(
Q('range', **{'colors.h': {'gte': (hsl[0] - 26) % 255}}) |
Q('range', **{'colors.h': {'lte': (hsl[0] + 26) % 255}})
)
else:
# If we don't have to wrap around then it's simpler, just need
# a single range query between 2 values.
clauses.append(
Q('range', **{'colors.h': {
'gte': hsl[0] - 26,
'lte': hsl[0] + 26,
}}),
)
# In any case, the color we're looking for needs to be present in at
# least 25% of the image.
clauses.append(Q('range', **{'colors.ratio': {'gte': 0.25}}))
return [Q('nested', path='colors', query=query.Bool(filter=clauses))]
class SearchQueryFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that performs an ES query according
to what's in the `q` GET parameter.
"""
MAX_QUERY_LENGTH = 100
MAX_QUERY_LENGTH_FOR_FUZZY_SEARCH = 20
def generate_exact_name_match_query(self, search_query, analyzer):
"""
Return the query used for exact name matching.
If the name of the add-on is an exact match for the search query, it's
likely to be what the user wanted to find. To support that, we need to
do a term query against a non-analyzed field and boost it super high.
Since we need to support translations, this function has 2 modes:
- In the first one, used when we are dealing with a language for which
we know we didn't store a translation in ES (because we don't have an
analyzer for it), it only executes a term query against `name.raw`.
- In the second one, we did store a translation in that language...
potentially. We don't know in advance if there is a translation for
each add-on! We need to do a query against both `name.raw` and
`name_l10n_<analyzer>.raw`, applying the boost only once if both
match. This is where the DisMax comes in, it's what MultiMatch
would do, except that it works with Term queries.
"""
if analyzer is None:
clause = query.Term(**{
'name.raw': {
'_name': 'Term(name.raw)',
'value': search_query, 'boost': 100.0
}
})
else:
query_name = 'DisMax(Term(name.raw), Term(name_l10n_%s.raw))' % (
analyzer)
clause = query.DisMax(
# We only care if one of these matches, so we leave tie_breaker
# to the default value of 0.0.
_name=query_name,
boost=100.0,
queries=[
{'term': {'name.raw': search_query}},
{'term': {'name_l10n_%s.raw' % analyzer: search_query}},
]
)
return clause
def primary_should_rules(self, search_query, analyzer):
"""Return "primary" should rules for the query.
These are the ones using the strongest boosts and are only applied to
the add-on name.
Applied rules:
* Exact match on the name, using the right translation if possible
(boost=100.0)
* Then text matches, using a language specific analyzer if possible
(boost=5.0)
* Phrase matches that allows swapped terms (boost=8.0)
* Then text matches, using the standard text analyzer (boost=6.0)
* Then look for the query as a prefix of a name (boost=3.0)
"""
should = [
self.generate_exact_name_match_query(search_query, analyzer)
]
# If we are searching with a language that we support, we also try to
# do a match against the translated field. If not, we'll do a match
# against the name in default locale below.
if analyzer:
should.append(
query.Match(**{
'name_l10n_%s' % analyzer: {
'_name': 'Match(name_l10n_%s)' % analyzer,
'query': search_query,
'boost': 5.0,
'analyzer': analyzer,
'operator': 'and'
}
})
)
# The rest of the rules are applied to 'name', the field containing the
# default locale translation only. That field has word delimiter rules
# to help find matches, lowercase filter, etc, at the expense of any
# language-specific features.
should.extend([
query.MatchPhrase(**{
'name': {
'_name': 'MatchPhrase(name)',
'query': search_query, 'boost': 8.0, 'slop': 1,
},
}),
query.Match(**{
'name': {
'_name': 'Match(name)',
'analyzer': 'standard',
'query': search_query, 'boost': 6.0, 'operator': 'and',
},
}),
query.Prefix(**{
'name': {
'_name': 'Prefix(name)',
'value': search_query, 'boost': 3.0
},
}),
])
# Add two queries inside a single DisMax rule (avoiding overboosting
# when an add-on name matches both queries) to support partial & fuzzy
# matches (both allowing some words in the query to be absent).
# For short query strings only (long strings, depending on what
# characters they contain and how many words are present, can be too
# costly).
# Again applied to 'name' in the default locale, without the
# language-specific analysis.
if len(search_query) < self.MAX_QUERY_LENGTH_FOR_FUZZY_SEARCH:
should.append(query.DisMax(
# We only care if one of these matches, so we leave tie_breaker
# to the default value of 0.0.
_name='DisMax(FuzzyMatch(name), Match(name.trigrams))',
boost=4.0,
queries=[
# For the fuzzy query, only slight mispellings should be
# corrected, but we allow some of the words to be absent
# as well:
# 1 or 2 terms: should all be present
# 3 terms: 2 should be present
# 4 terms or more: 25% can be absent
{
'match': {
'name': {
'query': search_query,
'prefix_length': 2,
'fuzziness': 'AUTO',
'minimum_should_match': '2<2 3<-25%'
}
}
},
# For the trigrams query, we require at least 66% of the
# trigrams to be present.
{
'match': {
'name.trigrams': {
'query': search_query,
'minimum_should_match': '66%'
}
}
},
]
))
return should
def secondary_should_rules(
self, search_query, analyzer, rescore_mode=False):
"""Return "secondary" should rules for the query.
These are the ones using the weakest boosts, they are applied to fields
containing more text: description & summary.
Applied rules:
* Look for matches inside the summary (boost=3.0)
* Look for matches inside the description (boost=2.0).
If we're using a supported language, both rules are done through a
multi_match that considers both the default locale translation
(using snowball analyzer) and the translation in the current language
(using language-specific analyzer). If we're not using a supported
language then only the first part is applied.
If rescore_mode is True, the match applied are match_phrase queries
with a slop of 5 instead of a regular match. As those are more
expensive they are only done in the 'rescore' part of the query.
"""
if rescore_mode is False:
query_class = query.Match
query_kwargs = {
'operator': 'and',
}
query_class_name = 'Match'
multi_match_kwargs = {
'operator': 'and',
}
else:
query_class = query.MatchPhrase
query_kwargs = {
'slop': 10,
}
query_class_name = 'MatchPhrase'
multi_match_kwargs = {
'slop': 10,
'type': 'phrase',
}
if analyzer:
summary_query_name = (
| |
#!/usr/bin/env python
# coding: utf-8
# <img src="imagenes/rn3.png" width="200">
# <img src="http://www.identidadbuho.uson.mx/assets/letragrama-rgb-150.jpg" width="200">
# # [Curso de Redes Neuronales](https://curso-redes-neuronales-unison.github.io/Temario/)
#
# # Redes neuronales multicapa y el algoritmo de *b-prop*
#
# [**<NAME>**](http://mat.uson.mx/~juliowaissman/), 22 de febrero de 2018.
#
# En esta libreta vamos a practicar con el algoritmo básico para realizar reconocimiento en redes neuronales hacia adelante y establecer una estructura básica para simular cn fines de comprensión. Para aplicaciones reales vamos a utilizar herramientas poderosas como [Tensorflow](https://www.tensorflow.org), pero es importante hacer una primer red neuronal simple a pie con el objetivo de entender mejor los mecanismos básicos.
#
# Como dijo Jack el destripador, vamos por partes, y empecemos con asumir que tenemos la especificación completa de la red neuronal y lo que queremos es poder generar una red neuronal inicial, o poder recuperar una red existente previamente guardada.
#
# Empecemos por inicializar los modulos que vamos a requerir.
# In[ ]:
import numpy as np
import _pickle as cPickle
# ## 1. Especificando una red neuronal
#
# Primero, para poder hacer una red neuronal, tenemos que determinar cierta información. La información importante que debemos de especificar cuando hacemos una redes neuronales es:
#
# - Cuantas capas de neuronas tiene la red neuronal, $L$.
# - Cuantas neuronas va a tener cada capa $[n_0, n_1, \ldots, n_L]$, donde $n_0$ es el número de entradas y $n_L$ el número de salidas.
# - Cual es la función de activación de las neuronas ocultas (logística, lineal rectificada, ...).
# - Cual es el tipo de salida de mi red neuronal (lineal, logística, unidad softmax, ... ).
# - Los valores con los que se normalizan los datos de entrada a la red neuronal (para el aprendizaje en una red neuronal es muy importante que los valores de entrada estén normalizados).
#
# Una vez que se establecen estos valores, es necesario generar una lista de matrices $[W^{(1)}, \ldots, W^{(L)}]$ donde $W^{(l)}$ es una matriz de dimensiones $(n_l, n_{l-1})$ de pesos. Igualmente es necesario generar una lista de vectores $[b^{(1)}, \ldots, b^{(L)}]$ donde $b^{(l)}$ es un vector de $n_l$ elementos llamados sesgos.
#
# Si se inicializan los valores de las entradas de $W^{(l)}$ y $b^{(l)}$ iguales, es equivalente a tener una sola neurona en esa capa, por lo que es necesario que estos valores sean diferentes. Para este ejemplo y con el fin de simplificar las operaciones de aprendizaje más adelante, vamos a asumir que la función de activación siempre será la función logística.
#
# Para efectos de un mejor aprendizaje, y asumiendo que la función de activación es la logistica, es importante que los valores iniciales de los pesos se encuentren en la zona donde casuan más variacion la función logística. Si asumimos que las entradas a cada neurona están normalizadas (esto es, entre 0 y 1), entonces los pesos deberían ser valores entre $(-\sqrt{n_{l-1}}, \sqrt{n_{l-1}})$ con el fin que la suma se encuentre en la región donde más cambios ocurren en la función logística.
#
# Vamos a generar y guardar esta información en un diccionario (junto con el resto de la información que requeriramos para tener una red neuronal completamente definida. Al principio los valores de normalización no cuentan ya que estos se deben inicializar al comienzo del aprendizaje. Fijate bien que naturalmente la red neuronal se debería programar como una clase, pero para evitar complejidad que no podamos mantener en TensoFlow vamos a dejarlo todo el código en forma estructurada (solo para dejar constancia).
#
# **Completa el código para inicializar la red neuronal**
# In[ ]:
def inicializa_red_neuronal(capas, tipo):
"""
Inicializa una red neuronal como un diccionario de datos.
Se asume en este caso que la función de activación es la función logística
Parámetros
----------
neuronas_por_capa: Una lista de enteros donde el primer elemento es el número de entradas
y el último el número de salidas, mientras que los intermedios son
el númerode neuronas en cada capa oculta.
tipo: Un string entre {'lineal', 'logistica', 'softmax'} con el tipo de función de salida de la red.
Devuelve
--------
Un diccionario `rn` tal que
- rn['capas'] = [n0, n1, ..., nL] neuronas por capa
- rn['tipo'] = tipo
- rn['W'] = [None, W1, ..., WL] lista de matrices de pesos
- rn['b'] = [None, b1, ..., bL] lista de sesgos
- rn['mu'] = lista de medias de cada atributo (se inicializan con puros 0)
- rn['std'] = lista de desviaciones estandard de cada atributo (se inicializan con puros 1)
"""
rn = {'capas': len(capas), 'tipo': tipo}
rn['mu'] = np.zeros(capas[0])
rn['std'] = np.ones(capas[0])
rn['W'], rn['b'] = inicializa_Wb(capas)
return rn
def inicializa_Wb(capas):
"""
Inicializa una matriz de valores aleatorios W
Parámetros
----------
capas: [n0, n1, ..., nL] número de neuronas por capa
Devuelve
--------
W, b donde W = [None, W1, ..., WL] y b = [None, b1, ..., bL]
"""
#------------------------------------------------------------------------
# Agregua aqui tu código
W = [None] + [np.random.rand(capas[l],capas[l-1]) for l in range(1,len(capas))]
b = [None] + [np.random.rand(capas[l]) for l in range(1,len(capas))]
#-------------------------------------------------------------------------
return W, b
def test_inicializaWb():
#Vamos a hacer 1000 pruebas aleatorias que nos aseguremos que se cumpleen con las especificaciones
for _ in range(1000):
n = [np.random.randint(1, 20) for _ in range(5)]
W, b = inicializa_Wb(n)
assert len(W) == len(b) == len(n)
for l in range(1, 5):
assert W[l].shape == (n[l], n[l - 1]) # Las dimensiones son correctas
assert W[l].max() < np.sqrt(n[l - 1]) # La cota máxima se respeta
assert W[l].min() > -np.sqrt(n[l - 1]) # La cota mínima se respeta
assert np.abs(W[l]).sum() > 0 # No estamos inicializando a 0
return "Paso la prueba"
print(test_inicializaWb())
# Como entrenar una red es algo lento y tedioso, y normalmente cuando hacemos un método de aprendizaje, lo que queremos es poder utilizarlo después para predecir un conjunto de datos no etiquetados previamente, es normal que guardemos en un archivo la información específica a la red neuronal, y despues la recuperemos en otra sesión, otro día, o en otra computadora para hacer la predicción.
#
# Una manera de guardar datos, funciones y objectos de Python en disco es utilizando el módulo ``pickle`` (o su versión compilada para mayor velocidad ``cPickle``). Este modulo permite guardar una serie de objetos de python en forma serializada en un archivo binario, y luego recuperarlos. Notese que este método es diferente a ``np.load`` y ``np.savez``, ya que estos solo permiten guardar (y recuperar) una serie de ndarrays únicamente.
#
# Vamos entonces a hacer dos funciones muy simples ``guarda_objeto`` y ``carga_objeto``, que utilizaremos más adelante.
# In[ ]:
def guarda_objeto(archivo, objeto):
"""
Guarda un objeto de python en el archivo "archivo". Si el archivo existe, sera reemplazado sin
preguntas, al puro estilo mafioso.
Parámetros
----------
archivo: string con el nombre de un archivo (aunque no exista)
objeto: Un objeto de python para ser guardado
"""
with open(archivo, 'wb') as arch:
cPickle.dump(objeto, arch, -1)
arch.close()
def carga_objeto(archivo):
"""
Carga el primer (y se asume que único) objeto contenido en el archivo 'archivo' que debe de ser tipo cPickle.
Parámetros
----------
archivo: string con el nombre de un archivo tipo pickle
Devuelve
--------
El primer objeto dentro del picke
"""
with open(archivo, 'rb') as arch:
objeto = cPickle.load(arch)
arch.close()
return objeto
def test_archivo():
"""
Prueba, para esto vamos a cargar o a leer (o ambas cosas) un objeto en un archivo
Por favor, borrar el archivo cada vez que se pruebe, o para probar la lectura y la escritura
"""
temp = [range(100), 'prueba', True]
guarda_objeto('prueba.pkl', temp)
temp =[10, 'no prueba', False]
otro = carga_objeto('prueba.pkl')
assert len(otro[0]) == 100
assert otro[1] == 'prueba'
assert otro[-1]
return "Pasa la prueba"
print(test_archivo())
# ## 2. Calculando la salida de una red neuronal con *feedforward*
#
#
# Asumamos que tenemos una red neuronal ya inicializada, y que la vamos a utilizar | |
<reponame>acorg/ssm-report<filename>py/ssm_report/obsolete/settings_report.py
error OBSOLETE
import sys, os, copy, datetime
import logging; module_logger = logging.getLogger(__name__)
from pathlib import Path
from acmacs_base.json import write_json
# ----------------------------------------------------------------------
def make_report_settings():
report_settings_file = Path("report.json")
if not report_settings_file.exists():
report = copy.deepcopy(sReport)
report["previous"] = find_previous_dir()
today = datetime.date.today()
if today.month > 2 and today.month < 10:
report["cover"]["hemisphere"] = "Southern"
report["cover"]["year"] = str(today.year + 1)
else:
report["cover"]["hemisphere"] = "Northern"
if today.month >= 10:
report["cover"]["year"] = "{}/{}".format(today.year + 1, today.year + 2)
else:
report["cover"]["year"] = "{}/{}".format(today.year, today.year + 1)
report["cover"]["meeting_date"] = (today + datetime.timedelta(days=7)).strftime("%d %B %Y")
report["time_series"]["date"] = {"start": (today - datetime.timedelta(days=180)).strftime("%Y-%m-01"), "end": today.strftime("%Y-%m-01")}
subst = {
"twelve_month_ago": (today - datetime.timedelta(days=365)).strftime("%B %Y"),
"six_month_ago": (today - datetime.timedelta(days=183)).strftime("%B %Y"),
}
def make_subst(entry):
if isinstance(entry, dict) and isinstance(entry.get("title"), str):
entry["title"] = entry["title"].format(**subst)
return entry
report["pages"] = [make_subst(entry) for entry in sReport["pages"]]
write_json(report_settings_file, report, compact=False)
return report_settings_file
# ----------------------------------------------------------------------
def find_previous_dir():
for dd in sorted(Path("..").resolve().glob("*"), reverse=True):
if dd.is_dir() and dd != Path(".").resolve():
return str(dd)
return None
# ----------------------------------------------------------------------
sReport = {
"previous": None,
"cover": {
"hemisphere": "Northern",
"meeting_date": "27 February - 2 March 2017",
"teleconference": "",
"year": "2017/2018",
},
"time_series": {
"date": {"end": "2017-02-01", "start": "2016-08-01"},
"period": "month",
},
"page_numbering": True,
"pages": [
{"type": "cover"},
{"type": "toc"},
{"type": "?************************* H1 *****************************"},
{"type": "section_begin", "title": "H1N1pdm09", "subtype": "h1"},
{"type": "subsection_begin", "subtype": "H1", "title": "H1N1pdm09 geographic data"},
{"type": "geographic_data_description", "coloring": "continents"},
"new_page",
{"type": "geographic_ts", "subtype": "H1"},
"new_page",
{"type": "subsection_begin", "subtype": "H1", "title": "H1N1pdm09 antigenic data"},
{"type": "antigenic_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "H1", "lab": "all"},
"new_page",
{"type": "antigenic_ts", "subtype": "H1", "assay": "HI", "lab": "all"},
"new_page",
{"type": "subsection_begin", "subtype": "H1", "title": "H1N1pdm09 phylogenetic tree"},
{"type": "phylogenetic_description"},
"new_page",
{"type": "phylogenetic_tree", "subtype": "H1"},
"new_page",
{"type": "subsection_begin", "subtype": "H1", "title": "H1N1pdm09 antigenic map colored by phylogenetic clade"},
{"type": "description", "text": "CDC+Crick+NIID+VIDRL antigenic map, antigens color-coded by phylogenetic clade."},
{"type": "map", "subtype": "H1", "assay": "HI", "lab": "all", "map_type": "clade"},
"new_page",
{"type": "subsection_begin", "subtype": "H1", "title": "H1N1pdm09 antigenic map with serology antigens."},
{"type": "description", "text": "CDC+Crick+NIID+VIDRL antigenic map with serology antigens in orange, other antigens color-coded by phylogenetic clade."},
{"type": "map", "subtype": "H1", "assay": "HI", "lab": "all", "map_type": "serology"},
"new_page",
{"type": "?************************* H3 *****************************"},
{"type": "section_begin", "title": "H3N2", "subtype": "h3"},
{"type": "subsection_begin", "subtype": "H1", "title": "H3N2 geographic data"},
{"type": "geographic_data_description", "coloring": "h3_clade"},
"new_page",
{"type": "geographic_ts", "subtype": "H3"},
"new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "CDC H3N2 HI antigenic data"},
{"type": "antigenic_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "H3", "lab": "CDC"},
"new_page",
{"type": "antigenic_ts", "subtype": "H3", "assay": "HI", "lab": "CDC"},
"new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "CDC H3N2 Neut antigenic data"},
{"type": "neut_ts_description", "coloring": "continents"},
"new_page",
{"type": "antigenic_ts", "subtype": "H3", "assay": "NEUT", "lab": "CDC"},
"new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "Crick H3N2 HI antigenic data"},
{"type": "antigenic_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "H3", "lab": "NIMR"},
"new_page",
{"type": "antigenic_ts", "subtype": "H3", "assay": "HI", "lab": "NIMR"},
"new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "Crick H3N2 Neut antigenic data"},
{"type": "neut_ts_description", "coloring": "continents"},
"new_page",
{"type": "antigenic_ts", "subtype": "H3", "assay": "NEUT", "lab": "NIMR"},
"new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "NIID H3N2 Neut antigenic data"},
{"type": "neut_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "H3", "lab": "NIID"},
"new_page",
{"type": "antigenic_ts", "subtype": "H3", "assay": "NEUT", "lab": "NIID"},
"new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "VIDRL H3N2 HI antigenic data"},
{"type": "antigenic_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "H3", "lab": "MELB"},
"new_page",
{"type": "antigenic_ts", "subtype": "H3", "assay": "HI", "lab": "MELB"},
"new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "VIDRL H3N2 Neut antigenic data"},
{"type": "neut_ts_description", "coloring": "continents"},
"new_page",
{"type": "antigenic_ts", "subtype": "H3", "assay": "NEUT", "lab": "MELB"},
"new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "H3N2 phylogenetic tree"},
{"type": "phylogenetic_description"},
"new_page",
{"type": "phylogenetic_tree", "subtype": "H3"},
"new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "H3N2 antigenic maps colored by geography"},
{"type": "maps", "images": [
"h3-hi/geography-cdc.pdf", "h3-neut/geography-cdc.pdf",
"h3-hi/geography-nimr.pdf", "h3-neut/geography-nimr.pdf",
"", "h3-neut/geography-niid.pdf",
"h3-hi/geography-melb.pdf", "h3-neut/geography-melb.pdf",
]},
"new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "H3N2 antigenic maps colored by phylogenetic clade"},
{"type": "maps", "images": [
"h3-hi/clade-cdc.pdf", "h3-neut/clade-cdc.pdf",
"h3-hi/clade-nimr.pdf", "h3-neut/clade-nimr.pdf",
"", "h3-neut/clade-niid.pdf",
"h3-hi/clade-melb.pdf", "h3-neut/clade-melb.pdf",
]},
"new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "H3N2 antigenic maps colored by phylogenetic clade (since {twelve_month_ago})"},
{"type": "maps", "images": [
"h3-hi/clade-12m-cdc.pdf", "h3-neut/clade-12m-cdc.pdf",
"h3-hi/clade-12m-nimr.pdf", "h3-neut/clade-12m-nimr.pdf",
"", "h3-neut/clade-12m-niid.pdf",
"h3-hi/clade-12m-melb.pdf", "h3-neut/clade-12m-melb.pdf",
]},
"new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "H3N2 antigenic maps colored by phylogenetic clade (since {six_month_ago})"},
{"type": "maps", "images": [
"h3-hi/clade-6m-cdc.pdf", "h3-neut/clade-6m-cdc.pdf",
"h3-hi/clade-6m-nimr.pdf", "h3-neut/clade-6m-nimr.pdf",
"", "h3-neut/clade-6m-niid.pdf",
"h3-hi/clade-6m-melb.pdf", "h3-neut/clade-6m-melb.pdf",
]},
"new_page",
{"?type": "subsection_begin", "subtype": "H3", "title": "H3N2 antigenic maps colored by amino-acids at 142"},
{"?type": "maps", "images": [
"h3-hi/aa-at-142-cdc.pdf", "h3-neut/aa-at-142-cdc.pdf",
"h3-hi/aa-at-142-nimr.pdf", "h3-neut/aa-at-142-nimr.pdf",
"", "h3-neut/aa-at-142-niid.pdf",
"h3-hi/aa-at-142-melb.pdf", "h3-neut/aa-at-142-melb.pdf"
]},
"?new_page",
{"type": "subsection_begin", "subtype": "H3", "title": "H3N2 antigenic maps with serology antigens"},
{"type": "maps", "images": [
"h3-hi/serology-cdc.pdf", "h3-neut/serology-cdc.pdf",
"h3-hi/serology-nimr.pdf", "h3-neut/serology-nimr.pdf",
"", "h3-neut/serology-niid.pdf",
"h3-hi/serology-melb.pdf", "h3-neut/serology-melb.pdf"
]},
"new_page",
{"type": "?************************* B *****************************"},
{"type": "section_begin", "title": "B", "subtype": "b"},
{"type": "subsection_begin", "subtype": "b", "title": "B Victoria and Yamagata geographic data"},
{"type": "geographic_data_description", "coloring": "b_lineage_vic_deletion_mutants"},
"new_page",
{"type": "geographic_ts", "subtype": "b"},
"new_page",
{"type": "?************************* B/Vic *****************************"},
{"type": "section_begin", "title": "B/Vic"},
{"type": "subsection_begin", "subtype": "bv", "title": "CDC B/Vic antigenic data"},
{"type": "antigenic_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "bv", "lab": "CDC"},
"new_page",
{"type": "antigenic_ts", "subtype": "bv", "assay": "HI", "lab": "CDC"},
"new_page",
{"type": "subsection_begin", "subtype": "bv", "title": "Crick B/Vic antigenic data"},
{"type": "antigenic_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "bv", "lab": "NIMR"},
"new_page",
{"type": "antigenic_ts", "subtype": "bv", "assay": "HI", "lab": "NIMR"},
"new_page",
{"type": "subsection_begin", "subtype": "bv", "title": "NIID B/Vic antigenic data"},
{"type": "antigenic_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "bv", "lab": "NIID"},
"new_page",
{"type": "antigenic_ts", "subtype": "bv", "assay": "HI", "lab": "NIID"},
"new_page",
{"type": "subsection_begin", "subtype": "bv", "title": "VIDRL B/Vic antigenic data"},
{"type": "antigenic_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "bv", "lab": "MELB"},
"new_page",
{"type": "antigenic_ts", "subtype": "bv", "assay": "HI", "lab": "MELB"},
"new_page",
{"type": "subsection_begin", "subtype": "bv", "title": "B/Vic phylogenetic tree"},
{"type": "phylogenetic_description"},
{"type": "phylogenetic_description_bvic_del"},
"new_page",
{"type": "phylogenetic_tree", "subtype": "bv"},
"new_page",
{"type": "subsection_begin", "subtype": "bv", "title": "B/Vic antigenic maps colored by phylogenetic clade"},
{"type": "description", "text": "CDC, Crick, NIID, VIDRL antigenic maps, antigens color-coded by phylogenetic clade."},
{"type": "maps", "images": [
"bv-hi/clade-cdc.pdf", "bv-hi/clade-nimr.pdf",
"bv-hi/clade-niid.pdf", "bv-hi/clade-melb.pdf"
]},
"new_page",
{"type": "subsection_begin", "subtype": "bv", "title": "B/Vic antigenic maps colored by phylogenetic clade (since {twelve_month_ago})"},
{"type": "maps", "images": [
"bv-hi/clade-12m-cdc.pdf", "bv-hi/clade-12m-nimr.pdf",
"bv-hi/clade-12m-niid.pdf", "bv-hi/clade-12m-melb.pdf"
]},
"new_page",
{"type": "subsection_begin", "subtype": "bv", "title": "B/Vic antigenic maps colored by phylogenetic clade (since {six_month_ago})"},
{"type": "maps", "images": [
"bv-hi/clade-6m-cdc.pdf", "bv-hi/clade-6m-nimr.pdf",
"bv-hi/clade-6m-niid.pdf", "bv-hi/clade-6m-melb.pdf"
]},
"new_page",
{"type": "subsection_begin", "subtype": "bv", "title": "B/Vic antigenic maps with serology antigens"},
{"type": "description", "text": "CDC, Crick, NIID, VIDRL antigenic maps with serology antigens in orange, other antigens color-coded by phylogenetic clade."},
{"type": "maps", "images": [
"bv-hi/serology-cdc.pdf", "bv-hi/serology-nimr.pdf",
"bv-hi/serology-niid.pdf", "bv-hi/serology-melb.pdf"
]},
"new_page",
{"type": "?************************* B/Yam *****************************"},
{"type": "section_begin", "title": "B/Yam"},
{"type": "subsection_begin", "subtype": "by", "title": "CDC B/Yam antigenic data"},
{"type": "antigenic_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "by", "lab": "CDC"},
"new_page",
{"type": "antigenic_ts", "subtype": "by", "assay": "HI", "lab": "CDC"},
"new_page",
{"type": "subsection_begin", "subtype": "by", "title": "Crick B/Yam antigenic data"},
{"type": "antigenic_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "by", "lab": "NIMR"},
"new_page",
{"type": "antigenic_ts", "subtype": "by", "assay": "HI", "lab": "NIMR"},
"new_page",
{"type": "subsection_begin", "subtype": "by", "title": "NIID B/Yam antigenic data"},
{"type": "antigenic_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "by", "lab": "NIID"},
"new_page",
{"type": "antigenic_ts", "subtype": "by", "assay": "HI", "lab": "NIID"},
"new_page",
{"type": "subsection_begin", "subtype": "by", "title": "VIDRL B/Yam antigenic data"},
{"type": "antigenic_ts_description", "coloring": "continents"},
{"type": "statistics_table", "subtype": "by", "lab": "MELB"},
"new_page",
{"type": "antigenic_ts", "subtype": "by", "assay": "HI", "lab": "MELB"},
"new_page",
{"type": "subsection_begin", "subtype": "by", "title": "B/Yam phylogenetic tree"},
{"type": "phylogenetic_description"},
"new_page",
{"type": "phylogenetic_tree", "subtype": "by"},
"new_page",
{"type": "subsection_begin", "subtype": "by", "title": "B/Yam antigenic maps colored by phylogenetic clade"},
{"type": "description", "text": "CDC, Crick, NIID, VIDRL antigenic maps, antigens color-coded by phylogenetic clade."},
{"type": "maps", "images": [
"by-hi/clade-cdc.pdf", "by-hi/clade-nimr.pdf",
"by-hi/clade-niid.pdf", "by-hi/clade-melb.pdf"
]},
"new_page",
{"type": "subsection_begin", "subtype": "by", "title": "B/Yam antigenic maps colored by phylogenetic clade (since {twelve_month_ago})"},
{"type": "maps", "images": [
"by-hi/clade-12m-cdc.pdf", "by-hi/clade-12m-nimr.pdf",
"by-hi/clade-12m-niid.pdf", "by-hi/clade-12m-melb.pdf"
]},
"new_page",
{"type": "subsection_begin", "subtype": "by", "title": "B/Yam antigenic maps colored by phylogenetic clade (since {six_month_ago})"},
{"type": "maps", "images": [
"by-hi/clade-6m-cdc.pdf", "by-hi/clade-6m-nimr.pdf",
"by-hi/clade-6m-niid.pdf", "by-hi/clade-6m-melb.pdf"
]},
"new_page",
{"type": "subsection_begin", "subtype": "by", "title": "B/Yam antigenic maps with serology antigens"},
{"type": "description", "text": "Top row left to right CDC, Crick, bottom row left to right NIID, VIDRL antigenic maps with serology antigens in orange, other | |
composite instance
inline_composite = inst.create_inline_composite(already_captured)
self.valid_instances.add(inline_composite)
else:
self.valid_instances.add(inst)
# Always add any reductions to our parent
for reduction in state.reduction_instances:
assert reduction not in self.owner.reductions
self.owner.reductions[reduction] = self.node
# Mark that we've been captured
already_captured.add(self.node)
return inst_capture
def capture_inline(self, target, already_captured):
changed = False
# Capture down the tree first, then do ourselves
for child in self.children:
if child.capture_inline(target, already_captured):
changed = True
# Now see if we need to capture ourself
if self.node not in already_captured:
copy = target.get_node(self.node)
copy.dirty = self.dirty
if self.dirty:
for inst in self.valid_instances:
if copy.valid_instances is None:
copy.valid_instances = set()
if inst.is_composite():
inline_composite = inst.create_inline_composite(already_captured)
copy.valid_instances.add(inline_composite)
if inline_composite is not inst:
changed = True
else:
copy.valid_instances.add(inst)
# Add ourselves to the already captured
already_captured.add(self.node)
else:
changed = True
return changed
def find_valid_instances(self, valid):
# Go up the tree if necessary
if not self.dirty and self.parent is not None:
self.parent.find_valid_instances(valid)
if self.valid_instances:
for inst in self.valid_instances:
valid.add(inst)
def need_temporary_instance(self, dst, region, need_check = True):
if need_check:
# See if we can keep going down
dominating_children = list()
for child in self.children:
if child.are_domination_tests_sound() and child.node.dominates(region):
dominating_children.append(child)
if len(dominating_children) == 1:
return dominating_children[0].need_temporary_instance(dst, region)
if self.dirty:
# Check to see if the target instance is already valid
if dst not in self.valid_instances:
# Check all the children to see if they have any dirty copies
# of the destination instance
for child in self.children:
if child.has_dirty_destination(dst):
return True
# Also need to check any composite instances to see if they
# need a temporary instance
for inst in self.valid_instances:
if isinstance(inst, CompositeInstance) and \
inst.need_temporary_instance(dst, region):
return True
# Now check all the children
for child in self.children:
if child.need_temporary_instance(dst, region, False):
return True
return False
def has_dirty_destination(self, dst):
if self.dirty and dst in self.valid_instances:
return True
for child in self.children:
if child.has_dirty_destination(dst):
return True
return False
def are_domination_tests_sound(self):
if isinstance(self.node, LogicalRegion):
return True
# Partition nodes are only sound if they have all the fields
if len(self.children) != self.node.get_num_children():
return False
return True
def issue_update_copies(self, dst, dst_depth, dst_field, region, op, index,
perform_checks, error_str, actually_across, need_check = True):
children_dominate = False
if need_check:
# Figure out how many children dominate the target region
# Keep going down if we there is exaclty one
dominating_children = list()
for child in self.children:
if child.are_domination_tests_sound() and child.node.dominates(region):
dominating_children.append(child)
if len(dominating_children) == 1:
return dominating_children[0].issue_update_copies(dst, dst_depth, dst_field,
region, op, index, perform_checks, error_str, actually_across)
# See if our open children dominate the target region,
# if they do then we can skip doing any copies from this level
target_points = region.get_point_set().copy()
for child in self.children:
if not child.are_domination_tests_sound():
continue
target_points -= child.node.get_point_set()
if target_points.empty():
break
if target_points.empty():
children_dominate = True
# If we need check (e.g. haven't initialized data) or we are dirty
# then we have to issue copies from this level
if (need_check or self.dirty) and not children_dominate:
if need_check:
local_valid = set()
self.find_valid_instances(local_valid)
else:
local_valid = self.valid_instances
# Issue update copies from our valid instances if there are
# valid instances and the destination is not already one of them
if local_valid and dst not in local_valid:
# Handle the virtual case
if len(local_valid) == 1 and \
next(iter(local_valid)).is_virtual():
virtual_inst = next(iter(local_valid))
if not virtual_inst.issue_copies_across(dst, dst_depth, dst_field,
region, op, index, perform_checks, error_str, actually_across):
return False
else:
if self.node is not region:
dst_preconditions = dst.find_copy_dependences(depth=dst_depth,
field=dst_field, op=op, index=index, region=region,
reading=False, redop=0, precise=True,
intersect=self.node)
else:
dst_preconditions = dst.find_copy_dependences(depth=dst_depth,
field=dst_field, op=op, index=index, region=region,
reading=False, redop=0, precise=True)
if perform_checks:
# Find the copy
if self.node is not region:
copy = op.find_generated_copy(self.owner.field,
region, dst, 0, self.node)
else:
copy = op.find_generated_copy(self.owner.field,
region, dst)
if copy is None:
if actually_across:
print("ERROR: Missing intersection copy across from "+
"composite instance to update "+str(dst)+
" for field "+str(dst_field)+" by "+error_str)
if self.owner.state.assert_on_error:
assert False
else:
print("ERROR: Missing intersection copy from "+
"composite instance to update "+str(dst)+
" for field "+str(dst_field)+" by "+error_str)
if self.owner.state.assert_on_error:
assert False
return False
src = copy.find_src_inst(self.owner.field)
if self.node is not region:
src_preconditions = src.find_copy_dependences(
depth=self.owner.depth, field=self.owner.field,
op=op, index=index, region=region, reading=True,
redop=0, precise=True, intersect=self.node)
else:
src_preconditions = src.find_copy_dependences(
depth=self.owner.depth, field=self.owner.field,
op=op, index=index, region=region, reading=True,
redop=0, precise=True)
if copy.reachable_cache is None:
copy.reachable_cache = set()
copy.get_physical_reachable(copy.reachable_cache, False)
bad = check_preconditions(src_preconditions, copy)
if bad is not None:
if actually_across:
print("ERROR: Missing source precondition on "+
str(bad)+" for "+str(copy)+" across from "+
"composite instance to update "+str(dst)+
" for field "+str(dst_field)+" by "+error_str)
if self.owner.state.assert_on_error:
assert False
else:
print("ERROR: Missing source precondition on "+
str(bad)+" for "+str(copy)+" from composite "+
"instance to update "+str(dst)+" for field "+
str(dst_field)+" by "+error_str)
if self.owner.state.assert_on_error:
assert False
return False
bad = check_preconditions(dst_preconditions, copy)
if bad is not None:
if actually_across:
print("ERROR: Missing destination precondition on "+
str(bad)+" for "+str(copy)+" across from "+
"composite instance to update "+str(dst)+
" for field "+str(dst_field)+" by "+error_str)
if self.owner.state.assert_on_error:
assert False
else:
print("ERROR: Missing destination precondition on "+
str(bad)+" for "+str(copy)+" from composite "+
"instance to update "+str(dst)+" for field "+
str(dst_field)+" by "+error_str)
if self.owner.state.assert_on_error:
assert False
return False
else:
# Figure out which instance to copy from
if len(local_valid) > 1:
print("INFO: Multiple valid instances to choose from in "+
"composite instance... picking one")
src = next(iter(local_valid))
if self.node is not region:
src_preconditions = src.find_copy_dependences(
depth=self.owner.depth, field=self.owner.field,
op=op, index=index, region=region, reading=True,
redop=0, precise=True, intersect=self.node)
else:
src_preconditions = src.find_copy_dependences(
depth=self.owner.depth, field=self.owner.field,
op=op, index=index, region=region, reading=True,
redop=0, precise=True)
# Make a realm copy from the source to the destination
copy = self.owner.state.create_copy(op)
copy.set_region(region)
copy.set_intersect(self.node)
copy.add_field(self.owner.field.fid, src,
self.owner.field.fid, dst, 0)
for src_op in src_preconditions:
src_op.physical_outgoing.add(copy)
copy.physical_incoming.add(src_op)
for dst_op in dst_preconditions:
dst_op.physical_outgoing.add(copy)
copy.physical_incoming.add(dst_op)
# Record the copy user
if self.node is not region:
src.add_copy_user(depth=self.owner.depth, field=self.owner.field,
region=region, op=copy, index=index,
reading=True, redop=0, intersect=self.node)
dst.add_copy_user(depth=dst_depth, field=dst_field,
region=region, op=copy, index=index,
reading=False, redop=0, intersect=self.node)
else:
src.add_copy_user(depth=self.owner.depth, field=self.owner.field,
region=region, op=copy, index=index,
reading=True, redop=0)
dst.add_copy_user(depth=dst_depth, field=dst_field, region=region,
op=copy, index=index, reading=False, redop=0)
# Now we can recurse down the tree from this point, no need for the check
for child in self.children:
# Check for intersection before going down
if not region.intersects(child.node):
continue
if not child.issue_update_copies(dst, dst_depth, dst_field, region, op,
index, perform_checks, error_str, actually_across, False):
return False
return True
class CompositeInstance(object):
__slots__ = ['state', 'root', 'depth', 'field', 'nodes',
'reductions', 'captured', 'complete']
def __init__(self, state, root, depth, field):
self.state = state
self.root = root
self.depth = depth
self.field = field
self.nodes = dict()
self.reductions = dict()
self.complete = set() # complete nodes
self.captured = set()
def __str__(self):
return "Composite Instance of "+str(self.field)+" at "+str(self.root)
__repr__ = __str__
def get_node(self, node):
if node in self.nodes:
return self.nodes[node]
if node is self.root:
result = CompositeNode(self, node, None)
else:
parent_node = self.get_node(node.parent)
result = CompositeNode(self, node, parent_node)
self.nodes[node] = result
return result
def capture(self, state, already_captured):
# Do the capture if we are dirty or have reductions or we are the root
if state.dirty or state.redop != 0 or state.node is self.root:
new_state = self.get_node(state.node)
# See if we can avoid capturing the instances
already_complete = False
# If we are complete and we captured all our children
# then we can skip capturing instances here
if state.node.is_complete() and state.node.has_all_children():
already_complete = True
for child in state.node.children.itervalues():
if not child in self.captured:
already_complete = False
break
# We can also skip if one of our children is complete
if not already_complete:
for child in state.node.children.itervalues():
if child in self.complete:
already_complete = True
break
inst_capture = new_state.capture(state, state.node is self.root,
already_captured, already_complete)
if inst_capture or already_complete:
self.captured.add(state.node)
if state.node.is_complete():
self.complete.add(state.node)
def create_inline_composite(self, already_captured):
result = CompositeInstance(self.state, self.root, self.depth, self.field)
# Keep track of whether anything changed when doing the
# transformation, if not, we can just return this instance
if self.nodes[self.root].capture_inline(result, already_captured):
# Did change so use the | |
; //...................................... RECOMMENDED - A place to acknowledge various types of support for the project that produced this data. (ACDD)
:license = "" ; //............................................. RECOMMENDED - Describe the restrictions to data access and distribution. (ACDD)
:standard_name_vocabulary = "CF Standard Name Table vNN" ; //.. RECOMMENDED - If using CF standard name attribute for variables. Replace NN with the CF standard name table number (CF)
:date_created = "" ; //........................................ RECOMMENDED - Creation date of this version of the data(netCDF). Use ISO 8601:2004 for date and time. (ACDD)
:creator_name = "" ; //........................................ RECOMMENDED - The name of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD)
:creator_email = "" ; //....................................... RECOMMENDED - The email address of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD)
:creator_url = "" ; //......................................... RECOMMENDED - The URL of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD)
:institution = "" ; //......................................... RECOMMENDED -The name of the institution principally responsible for originating this data.. An institution attribute can be used for each variable if variables come from more than one institution. (CF/ACDD)
:project = "" ; //............................................. RECOMMENDED - The name of the project(s) principally responsible for originating this data. Multiple projects can be separated by commas. (ACDD)
:publisher_name = "" ; //...................................... RECOMMENDED - The name of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD)
:publisher_email = "" ; //..................................... RECOMMENDED - The email address of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD)
:publisher_url = "" ; //....................................... RECOMMENDED - The URL of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD)
:geospatial_bounds = "" ; //................................... RECOMMENDED - Describes the data's 2D or 3D geospatial extent in OGC's Well-Known Text (WKT) Geometry format. (ACDD)
:geospatial_bounds_crs = "" ; //............................... RECOMMENDED - The coordinate reference system (CRS) of the point coordinates in the geospatial_bounds attribute. (ACDD)
:geospatial_bounds_vertical_crs = "" ; //...................... RECOMMENDED - The vertical coordinate reference system (CRS) for the Z axis of the point coordinates in the geospatial_bounds attribute. (ACDD)
:geospatial_lat_min = 0.0d ; //................................ RECOMMENDED - Describes a simple lower latitude limit. (ACDD)
:geospatial_lat_max = 0.0d ; //................................ RECOMMENDED - Describes a simple upper latitude limit. (ACDD)
:geospatial_lon_min = 0.0d ; //................................ RECOMMENDED - Describes a simple lower longitude limit. (ACDD)
:geospatial_lon_max = 0.0d ; //................................ RECOMMENDED - Describes a simple upper longitude limit. (ACDD)
:geospatial_vertical_min = 0.0d ; //........................... RECOMMENDED - Describes the numerically smaller vertical limit. (ACDD)
:geospatial_vertical_max = 0.0d ; //........................... RECOMMENDED - Describes the numerically larger vertical limit. (ACDD)
:geospatial_vertical_positive = "" ; //........................ RECOMMENDED - Use "up" or "down". (ACDD)
:time_coverage_start = "" ; //................................. RECOMMENDED - Describes the time of the first data point in the data set. Use ISO 8601:2004 for date and time. (ACDD)
:time_coverage_end = "" ; //................................... RECOMMENDED - Describes the time of the last data point in the data set. Use ISO 8601:2004 for date and time.(ACDD)
:time_coverage_duration = "" ; //.............................. RECOMMENDED - Describes the duration of the data set. Use ISO 8601:2004 for date and time. (ACDD)
:time_coverage_resolution = "" ; //............................ RECOMMENDED - Describes the targeted time period between each value in the data set. Use ISO 8601:2004 for date and time. (ACDD)
:uuid = "" ; //................................................ RECOMMENDED - Machine readable unique identifier for each file. A new uuid is created whenever the file is changed. (NCEI)
:sea_name = "" ; //............................................ RECOMMENDED - The names of the sea in which the data were collected. Use NCEI sea names table. (NCEI)
'''
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
sea_names = [sn.lower() for sn in util.get_sea_names()]
sea_name = getattr(dataset, 'sea_name', '')
sea_name = sea_name.replace(', ', ',')
sea_name = sea_name.split(',') if sea_name else []
for sea in sea_name:
recommended_ctx.assert_true(
sea.lower() in sea_names,
'sea_name attribute should exist and should be from the NODC sea names list: {} is not a valid sea name'.format(sea)
)
# Parse dates, check for ISO 8601
for attr in ['time_coverage_start', 'time_coverage_end', 'date_created', 'date_modified']:
attr_value = getattr(dataset, attr, '')
try:
parse_datetime(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except ISO8601Error:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
value = getattr(dataset, 'geospatial_vertical_positive', '')
recommended_ctx.assert_true(value.lower() in ['up', 'down'], 'geospatial_vertical_positive attribute should be up or down: {}'.format(value))
# I hate english.
ack_exists = any((getattr(dataset, attr, '') != '' for attr in ['acknowledgment', 'acknowledgement']))
recommended_ctx.assert_true(ack_exists, 'acknowledgement attribute should exist and not be empty')
standard_name_vocab = getattr(dataset, 'standard_name_vocabulary', '')
regex = re.compile(r'[sS]tandard [nN]ame [tT]able')
recommended_ctx.assert_true(regex.search(standard_name_vocab),
"standard_name_vocabulary doesn't contain 'Standard Name Table': {}".format(standard_name_vocab))
if hasattr(dataset, 'comment'):
recommended_ctx.assert_true(getattr(dataset, 'comment', '') != '', 'comment attribute should not be empty if specified')
return recommended_ctx.to_result()
def check_base_suggested_attributes(self, dataset):
'''
Check the global suggested attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:creator_type = "" ; //........................................ SUGGESTED - Specifies type of creator with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:creator_institution = "" ; //................................. SUGGESTED - The institution of the creator; should uniquely identify the creator's institution. (ACDD)
:publisher_type = "" ; //...................................... SUGGESTED - Specifies type of publisher with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:publisher_institution = "" ; //............................... SUGGESTED - The institution that presented the data file or equivalent product to users; should uniquely identify the institution. (ACDD)
:program = "" ; //............................................. SUGGESTED - The overarching program(s) of which the dataset is a part. (ACDD)
:contributor_name = "" ; //.................................... SUGGESTED - The name of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:contributor_role = "" ; //.................................... SUGGESTED - The role of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:geospatial_lat_units = "degrees_north" ; //.................. SUGGESTED - Units for the latitude axis described in "geospatial_lat_min" and "geospatial_lat_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_lon_units = "degrees_east"; //..................... SUGGESTED - Units for the longitude axis described in "geospatial_lon_min" and "geospatial_lon_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_units = "" ; //........................... SUGGESTED - Units for the vertical axis described in "geospatial_vertical_min" and "geospatial_vertical_max" attributes. The default is EPSG:4979. (ACDD)
:date_modified = "" ; //....................................... SUGGESTED - The date on which the data was last modified. Note that this applies just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_issued = "" ; //......................................... SUGGESTED - The date on which this data (including all modifications) was formally issued (i.e., made available to a wider audience). Note that these apply just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_metadata_modified = "" ; //.............................. SUGGESTED - The date on which the metadata was last modified. Use ISO 8601:2004 for date and time. (ACDD)
:product_version = "" ; //..................................... SUGGESTED - Version identifier of the data file or product as assigned by the data creator. (ACDD)
:keywords_vocabulary = "" ; //................................. SUGGESTED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". Example: 'GCMD:GCMD Keywords' ACDD)
:platform = "" ; //............................................ SUGGESTED - Name of the platform(s) that supported the sensor data used to create this data set or product. Platforms can be of any type, including satellite, ship, station, | |
tensor3D
# c0 = tensor.alloc(numpy_floatX(0.), beam_width, options['dim_proj'])
u0 = tensor.alloc(numpy_floatX(0.), hidi.shape[0], beam_width)
hiddeni, celli, probi = _ptr_probs(xi_mask, xi, hidi, celi, u0, hids, hiddens_mask)
f_probi = theano.function(inputs=[xi_mask, xi, hidi, celi, hids, p_mask, p], outputs=[hiddeni, celli, probi])
return preds, f_encode, f_decode, f_probi
def gen_model(p, p_mask, f_encode, f_probi, options):
# p: n_sizes * n_samples * data_dim
n_sizes = p.shape[0]
n_samples = p.shape[1] if p.ndim == 3 else 1
beam_width = n_sizes # for beam search
hprev = f_encode(p_mask, p) # n_sizes * n_samples * data_dim
c0 = numpy.zeros((n_samples, options['dim_proj']), dtype=config.floatX)
xi = numpy.zeros((n_samples,), dtype='int64')
# xi_mask = numpy.zeros((n_samples,), dtype=config.floatX)
h, c, probi = f_probi(p_mask[0], xi, hprev[-1], c0, hprev, p_mask, p) # probi n_sizes * n_samples
route = -numpy.ones((beam_width, n_samples, n_sizes), dtype='int64')
costi = -numpy.log(probi)
idx = costi.argsort(axis=0)[:beam_width] # beam_width * n_samples
route[:, :, 0] = idx
costs = costi[idx, numpy.arange(n_samples)]
# tile to beam numbers
hprev = numpy.tile(hprev[:, None, :, :], (1, beam_width, 1, 1)) # n_sizes * beam_width * n_samples * dim_proj
h = numpy.tile(h[None, :, :], (beam_width, 1, 1))
c = numpy.tile(c[None, :, :], (beam_width, 1, 1))
probi = numpy.tile(probi[:, None, :], (1, beam_width, 1))
# costs = numpy.tile(costs[:, None, :], (1, beam_width, 1))
idr = numpy.tile(numpy.arange(n_sizes), (beam_width, 1)).T.flatten()
idc = numpy.tile(numpy.arange(beam_width), (n_sizes, 1)).flatten()
ids = numpy.tile(numpy.arange(n_samples)[None, :], (beam_width, 1))
for i in range(1, n_sizes):
for b in range(beam_width):
# h: beam_width * n_sampels * dim_proj
# c: beam_width * n_sampels * dim_proj
# probi: n_sizes * beam_width * n_samples
h[b], c[b], probi[:, b, :] = f_probi(p_mask[i], idx[b], h[b], c[b], hprev[:, b, :, :], p_mask, p)
probi[:, b, :] *= p_mask[i] # set unmasked to 0
probi[:, b, :] += (1 - p_mask[i]) # then set to 1, since log(1) = 0 for calculating cost
costi = -numpy.log(probi) # costi: n_sizes * beam_width * n_samples
costs = numpy.tile(costs[None, :, :], (n_sizes, 1, 1)) # duplicate costs x n_sizes
costu = costi + costs
# idb = numpy.outer(numpy.arange(beam_width),numpy.ones((i,))).astype('int64')
# idbn = numpy.tile(idb[:,None,:], (1, n_samples, 1))
idbn = numpy.tile(numpy.arange(beam_width)[:, None, None], (1, n_samples, i))
idsn = numpy.tile(numpy.arange(n_samples)[None, :, None], (beam_width, 1, i))
costu[route[:, :, :i], idbn, idsn] = numpy.inf
idx = costu.reshape(n_sizes * beam_width, n_samples).argsort(axis=0)[:beam_width] # duplication can be selected
h = h[idc[idx], ids, :]
c = c[idc[idx], ids, :]
route = route[idc[idx], ids, :]
route[:, :, i] = idr[idx]
costi += costs
costs = costi[idr[idx], idc[idx], ids]
idx = idr[idx]
costs /= numpy.tile((p_mask.sum(axis=0) + numpy.ones(p_mask[0].shape)), (beam_width, 1))
# route: beam_width * n_samples * route
# costs: beam_width * n_samples
return route, costs
def tour_length(problem, route):
n_sizes = route.shape[0] - 1
n_from = problem[route[0]]
length = 0.
for i in range(1, n_sizes):
n_to = problem[route[i]]
length += numpy.linalg.norm(n_to - n_from)
n_from = n_to
n_to = problem[route[0]]
length += numpy.linalg.norm(n_to - n_from)
return length
def tsp_eva(f_encode, f_probi, prepare_data, data, iterator, options):
len_sum = 0
for _, valid_index in iterator:
tspv = [data[t] for t in valid_index]
v, vm, vx, vxm, vy, vym = prepare_data(tspv)
r, c = gen_model(v, vm, f_encode, f_probi, options)
route = r[0]
# routes.extend(route)
for s in range(route.shape[0]):
len_sum += tour_length(v[:, s, :], route[s])
len_sum /= len(data)
return len_sum
def gen_hull(p, p_mask, f_encode, f_probi, options):
# p: n_sizes * n_samples * data_dim
n_sizes = p.shape[0]
n_samples = p.shape[1] if p.ndim == 3 else 1
hprev = f_encode(p_mask, p) # n_sizes * n_samples * data_dim
points = numpy.zeros((n_samples, n_sizes), dtype='int64')
h = hprev[-1]
c = numpy.zeros((n_samples, options['dim_proj']), dtype=config.floatX)
xi = numpy.zeros((n_samples,), dtype='int64')
xi_mask = numpy.ones((n_samples,), dtype=config.floatX)
for i in range(n_sizes):
h, c, probi = f_probi(p_mask[i], xi, h, c, hprev, p_mask, p)
xi = probi.argmax(axis=0)
xi *= xi_mask.astype(numpy.int64) # Avoid compatibility problem in numpy 1.10
xi_mask = (numpy.not_equal(xi, 0)).astype(config.floatX)
if numpy.equal(xi_mask, 0).all():
break
points[:, i] = xi
return points
def hull_accuracy(problem, result, target):
nzr = numpy.nonzero(result)[0]
nzt = numpy.nonzero(target)[0]
result = result[nzr]
target = target[nzt]
if len(result) < 3 or len(set(result)) != len(result):
return -1.0, 0.0
pp = Polygon(problem[result])
if pp.is_valid:
# intersected area
tt = Polygon(problem[target])
intersection = tt.intersection(pp)
intersec_per = intersection.area / tt.area
if set(result) == set(target):
return 1.0, intersec_per
else:
return 0.0, intersec_per
else:
return -1.0, 0.0
def ch_eva(f_encode, f_probi, prepare_data, data, iterator, options):
accuracy = 0.0
counter = 0.0
area = 0.0
for _, valid_index in iterator:
chv = [data[t] for t in valid_index]
v, vm, vx, vxm, vy, vym = prepare_data(chv)
r = gen_hull(v, vm, f_encode, f_probi, options)
hull_idx = r
for s in range(hull_idx.shape[0]):
acc, area_per = hull_accuracy(v[:, s, :], hull_idx[s, :], vy[:, s])
if acc >= 0:
accuracy += acc
counter += 1
area += area_per
if counter > 0:
return 1 - accuracy / len(data), counter / len(data), area / counter
return 1.0, 0.0, 0.0
def build_model(tparams, options):
# for training
p = tensor.tensor3('p', dtype=config.floatX) # Problems, n_sizes * n_samples * data_dim
p_mask = tensor.matrix('p_mask', dtype=config.floatX)
x = tensor.matrix('x', dtype='int64') # n_steps * n_samples
x_mask = tensor.matrix('x_mask', dtype=config.floatX)
y = tensor.matrix('y', dtype='int64') # n_steps * n_samples
y_mask = tensor.matrix('y_mask', dtype=config.floatX)
# for generation
hidi = tensor.matrix('hidi', dtype=config.floatX)
celi = tensor.matrix('celi', dtype=config.floatX)
hids = tensor.tensor3('hids', dtype=config.floatX)
xi = tensor.vector('xi', dtype='int64')
xi_mask = tensor.vector('xi_mask', dtype=config.floatX)
n_steps = x.shape[0]
n_samples = x.shape[1]
preds, f_encode, f_decode, f_probi = ptr_network(tparams, p, p_mask, x, x_mask, xi, xi_mask, hidi, celi, hids,
options)
idx_steps = tensor.outer(tensor.arange(n_steps, dtype='int64'), tensor.ones((n_samples,), dtype='int64'))
idx_samples = tensor.outer(tensor.ones((n_steps,), dtype='int64'), tensor.arange(n_samples, dtype='int64'))
probs = preds[idx_steps, y, idx_samples]
# probs *= y_mask
off = 1e-8
if probs.dtype == 'float16':
off = 1e-6
# probs += (1 - y_mask) # change unmasked position to 1, since log(1) = 0
probs += off
# probs_printed = theano.printing.Print('this is probs')(probs)
cost = -tensor.log(probs)
cost *= y_mask
cost = cost.sum(axis=0) / y_mask.sum(axis=0)
cost = cost.mean()
return p, p_mask, x, x_mask, y, y_mask, preds, cost, f_encode, f_decode, f_probi
def train_lstm(
dim_proj=128, # LSTM number of hidden units.
patience=10, # Number of epoch to wait before early stop if no progress
max_epochs=5000, # The maximum number of epoch to run
dispFreq=10, # Display to stdout the training progress every N updates
decay_c=0., # Weight decay for the classifier applied to the U weights.
lrate=0.01, # Learning rate for sgd (not used for adadelta and rmsprop)
optimizer=rmsprop,
# sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate).
depth=2,
saveto='ptr_model.npz', # The best model will be saved there
validFreq=370, # Compute the validation error after this number of update.
saveFreq=1110, # Save the parameters after every saveFreq updates
maxlen=100, # Sequence longer then this get ignored
batch_size=16, # The batch size during training.
valid_batch_size=64, # The batch size used for validation/test set.
dataset='tsp',
# Parameter for extra option
noise_std=0.,
use_dropout=False, # if False slightly faster, but worst test error
# This frequently need a bigger model.
reload_model=None, # Path to a saved model we want to start from.
datapath='data.pkl.gz',
):
model_options = locals().copy()
load_data, prepare_data = get_dataset(dataset)
print 'Loading data'
train, valid, test = load_data(path=datapath)
model_options['data_dim'] = train[0][0][0].shape[0] # data_dim = 2, i.e (x,y)
print 'Building model'
params = init_params(model_options)
if reload_model:
load_params(reload_model, params)
tparams = init_tparams(params)
(p, p_mask, x, x_mask, y, y_mask, preds, cost, f_encode, f_decode, f_probi) = build_model(tparams, model_options)
f_cost = theano.function([p, p_mask, x, x_mask, y, y_mask], cost, name='f_cost')
grads = tensor.grad(theano.gradient.grad_clip(cost, -2.0, 2.0), wrt=tparams.values())
# grads = tensor.grad(cost, wrt=tparams.values())
f_grad = theano.function([p, p_mask, x, x_mask, y, y_mask], grads, name='f_grad')
lr = tensor.scalar(name='lr')
f_grad_shared, f_update = optimizer(lr, tparams, grads, p, p_mask, x, x_mask, y, y_mask, cost)
# generation
print 'Optimization'
kf_valid = get_minibatches_idx(len(valid), valid_batch_size)
kf_test = get_minibatches_idx(len(test), valid_batch_size)
print "%d train examples" % len(train)
print "%d valid examples" % len(valid)
print "%d test examples" % len(test)
history_err = []
best_p = None
bad_counter = 0
if validFreq == -1:
validFreq = len(train) / batch_size
if saveFreq == -1:
saveFreq = len(train) / batch_size
uidx = 0 # the number of update done
eidx = 0
estop = False
start_time = time.time()
train_err = 0.0
valid_err = | |
file
extcat = np.genfromtxt(xmatchexternal,
usecols=externalcolnums,
delimiter=externalcolsep,
names=externalcolnames,
dtype=externalcoldtypes)
ext_cosdecl = np.cos(np.radians(extcat['decl']))
ext_sindecl = np.sin(np.radians(extcat['decl']))
ext_cosra = np.cos(np.radians(extcat['ra']))
ext_sinra = np.sin(np.radians(extcat['ra']))
ext_xyz = np.column_stack((ext_cosra*ext_cosdecl,
ext_sinra*ext_cosdecl,
ext_sindecl))
ext_xyzdist = 2.0 * np.sin(np.radians(xmatchdistarcsec/3600.0)/2.0)
# get our kdtree
our_kdt = lclist['kdtree']
# get the external kdtree
ext_kdt = sps.cKDTree(ext_xyz)
# do a query_ball_tree
extkd_matchinds = ext_kdt.query_ball_tree(our_kdt, ext_xyzdist)
for extind, mind in enumerate(extkd_matchinds):
if len(mind) > 0:
ext_matches.append(mind[0])
ext_matching_objects.append(extcat['objectid'][extind])
ext_matches = np.array(ext_matches)
if ext_matches.size > 0:
# update the xmatch_matching_index
xmatch_matching_index[ext_matches] = True
LOGINFO('xmatch: objects matched to %s within %.1f arcsec: %s' %
(extfile, extmatchdist, ext_matches.size))
else:
LOGERROR("xmatch: no objects were cross-matched to external "
"catalog spec: %s, can't continue" % xmatchexternal)
return None, None, None
except Exception as e:
LOGEXCEPTION('could not match to external catalog spec: %s' %
repr(xmatchexternal))
raise
# do the cone search next
if (conesearch and isinstance(conesearch, list) and len(conesearch) == 3):
try:
racenter, declcenter, searchradius = conesearch
cosdecl = np.cos(np.radians(declcenter))
sindecl = np.sin(np.radians(declcenter))
cosra = np.cos(np.radians(racenter))
sinra = np.sin(np.radians(racenter))
# this is the search distance in xyz unit vectors
xyzdist = 2.0 * np.sin(np.radians(searchradius)/2.0)
# get the kdtree
our_kdt = lclist['kdtree']
# look up the coordinates
kdtindices = our_kdt.query_ball_point([cosra*cosdecl,
sinra*cosdecl,
sindecl],
xyzdist,
n_jobs=conesearchworkers)
if kdtindices and len(kdtindices) > 0:
LOGINFO('cone search: objects within %.4f deg '
'of (%.3f, %.3f): %s' %
(searchradius, racenter, declcenter, len(kdtindices)))
# update the conesearch_matching_index
matchingind = kdtindices
conesearch_matching_index[np.array(matchingind)] = True
# we fail immediately if we found nothing. this assumes the user
# cares more about the cone-search than the regular column filters
else:
LOGERROR("cone-search: no objects were found within "
"%.4f deg of (%.3f, %.3f): %s, can't continue" %
(searchradius, racenter, declcenter, len(kdtindices)))
return None, None
except Exception as e:
LOGEXCEPTION('cone-search: could not run a cone-search, '
'is there a kdtree present in %s?' % listpickle)
raise
# now that we're done with cone-search, do the column filtering
allfilterinds = []
if columnfilters and isinstance(columnfilters, list):
# go through each filter
for cfilt in columnfilters:
try:
fcol, foperator, foperand = cfilt.split('|')
foperator = FILTEROPS[foperator]
# generate the evalstring
filterstr = (
"np.isfinite(lclist['objects']['%s']) & "
"(lclist['objects']['%s'] %s %s)"
) % (fcol, fcol, foperator, foperand)
filterind = eval(filterstr)
ngood = lclist['objects'][objectidcol][filterind].size
LOGINFO('filter: %s -> objects matching: %s ' % (cfilt, ngood))
allfilterinds.append(filterind)
except Exception as e:
LOGEXCEPTION('filter: could not understand filter spec: %s'
% cfilt)
LOGWARNING('filter: not applying this broken filter')
# now that we have all the filter indices good to go
# logical-AND all the things
# make sure we only do filtering if we were told to do so
if (xmatchexternal or conesearch or columnfilters):
filterstack = []
if xmatchexternal:
filterstack.append(xmatch_matching_index)
if conesearch:
filterstack.append(conesearch_matching_index)
if columnfilters:
filterstack.extend(allfilterinds)
finalfilterind = np.column_stack(filterstack)
finalfilterind = np.all(finalfilterind, axis=1)
# get the filtered object light curves and object names
filteredobjectids = lclist['objects'][objectidcol][finalfilterind]
filteredlcfnames = lclist['objects']['lcfname'][finalfilterind]
else:
filteredobjectids = lclist['objects'][objectidcol]
filteredlcfnames = lclist['objects']['lcfname']
# if copylcsto is not None, copy LCs over to it
if copylcsto is not None:
if not os.path.exists(copylcsto):
os.mkdir(copylcsto)
if TQDM:
lciter = tqdm(filteredlcfnames)
else:
lciter = filteredlcfnames
LOGINFO('copying matching light curves to %s' % copylcsto)
for lc in lciter:
shutil.copy(lc, copylcsto)
LOGINFO('done. objects matching all filters: %s' % filteredobjectids.size)
if xmatchexternal and len(ext_matching_objects) > 0:
if outfile is not None:
with open(outfile, 'wb') as outfd:
outdict = {
'inputlist':listpickle,
'kwargs':{},
'filtered_lcfnames':filteredlcfnames,
'filtered_objectids':filteredobjectids,
'filtered_extmatches':ext_matching_objects,
}
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return filteredlcfnames, filteredobjectids, ext_matching_objects
else:
if outfile is not None:
with open(outfile, 'wb') as outfd:
outdict = {
'inputlist':listpickle,
'kwargs':{},
'filtered_lcfnames':filteredlcfnames,
'filtered_objectids':filteredobjectids,
'filtered_extmatches':None,
}
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return filteredlcfnames, filteredobjectids
##################################
## GETTING VARIABILITY FEATURES ##
##################################
def get_varfeatures(lcfile,
outdir,
timecols=None,
magcols=None,
errcols=None,
mindet=1000,
lcformat='hat-sql'):
'''
This runs varfeatures on a single LC file.
'''
#
# handle the lcformat
#
formatspec = get_lcformat_spec(lcformat)
# first, import the reader module
readermod = importlib.import_module(formatspec['lcreader_module'])
# then, get the function we need to read the lightcurve
readerfunc = getattr(readermod, formatspec['lcreader_func'])
# get any default kwargs we've set for this LC format
if formatspec['lcreader_kwargs'] is not None:
readerkwargs = formatspec['lcreader_kwargs']
else:
readerkwargs = None
# get the default timecols, magcols, errcols
if timecols is None:
timecols = formatspec['timecols']
if magcols is None:
magcols = formatspec['magcols']
if errcols is None:
errcols = formatspec['errcols']
# get the normalization module and function if provided
if formatspec['lcnorm_module'] is not None:
normmod = importlib.import_module(formatspec['lcnorm_module'])
normfunc = getattr(normmod, formatspec['lcnorm_func'])
else:
normfunc = None
# get any normalization function kwargs we've set
if formatspec['lcnorm_kwargs'] is not None:
normkwargs = formatspec['lcnorm_kwargs']
else:
normkwargs = None
# finally, get the magsarefluxes key
magsarefluxes = formatspec['magsarefluxes']
try:
# get the LC into a dict
if readerkwargs:
lcdict = readerfunc(lcfile, **readerkwargs)
else:
lcdict = readerfunc(lcfile)
if isinstance(lcdict, tuple) and isinstance(lcdict[0],dict):
lcdict = lcdict[0]
resultdict = {'objectid':lcdict['objectid'],
'info':lcdict['objectinfo'],
'lcfbasename':os.path.basename(lcfile)}
# normalize using the special function if specified
if normfunc is not None:
if normkwargs is not None:
lcdict = normfunc(lcdict, **normkwargs)
else:
lcdict = normfunc(lcdict)
for tcol, mcol, ecol in zip(timecols, magcols, errcols):
# dereference the columns and get them from the lcdict
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = dict_get(lcdict, ecolget)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
times, mags,
magsarefluxes=magsarefluxes
)
times, mags, errs = ntimes, nmags, errs
# make sure we have finite values
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
# make sure we have enough finite values
if mags[finind].size < mindet:
LOGINFO('not enough LC points: %s in normalized %s LC: %s' %
(mags[finind].size, mcol, os.path.basename(lcfile)))
resultdict[mcolget[-1]] = None
else:
# get the features for this magcol
lcfeatures = features.all_nonperiodic_features(
times, mags, errs
)
resultdict[mcolget[-1]] = lcfeatures
# now that we've collected all the magcols, we can choose which is the
# "best" magcol. this is defined as the magcol that gives us the
# smallest LC MAD.
try:
magmads = np.zeros(len(magcols))
for mind, mcol in enumerate(magcols):
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
magmads[mind] = resultdict[mcolget[-1]]['mad']
# smallest MAD index
bestmagcolind = np.where(magmads == np.min(magmads))[0]
resultdict['bestmagcol'] = magcols[bestmagcolind]
except:
resultdict['bestmagcol'] = None
outfile = os.path.join(outdir,
'varfeatures-%s.pkl' % resultdict['objectid'])
with open(outfile, 'wb') as outfd:
pickle.dump(resultdict, outfd, protocol=4)
return outfile
except Exception as e:
LOGEXCEPTION('failed to get LC features for %s because: %s' %
(os.path.basename(lcfile), e))
return None
def variability_threshold(featuresdir,
outfile,
magbins=np.arange(8.0,16.25,0.25),
maxobjects=None,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
min_lcmad_stdev=5.0,
min_stetj_stdev=2.0,
min_iqr_stdev=2.0,
min_inveta_stdev=2.0,
verbose=True):
'''This generates a list of objects with stetson J, IQR, and 1.0/eta
above some threshold value to select them as potential variable stars.
Use this to pare down the objects to review and put through
period-finding. This does the thresholding per magnitude bin; this should be
better than one single cut through the entire magnitude range. Set the
magnitude bins using the magbins kwarg.
outfile is a pickle file that will contain all the info.
min_lcmad_stdev, min_stetj_stdev, min_iqr_stdev, min_inveta_stdev are all
stdev multipliers to use for selecting variable objects. These are either
scalar floats to apply the same sigma cut for each magbin or np.ndarrays of
size = magbins.size - 1 to apply different sigma cuts for each magbin.
FIXME: implement a voting classifier here. this will choose variables based
on the thresholds in IQR, stetson, and inveta based on weighting carried
over from the variability recovery sims.
'''
#
# handle the lcformat
#
formatspec = get_lcformat_spec(lcformat)
# first, import the reader module
readermod = importlib.import_module(formatspec['lcreader_module'])
# then, get the function we need to read the lightcurve
readerfunc = getattr(readermod, formatspec['lcreader_func'])
# get any default kwargs we've set for this LC format
if formatspec['lcreader_kwargs'] is not None:
readerkwargs = formatspec['lcreader_kwargs']
else:
readerkwargs = None
# get the default timecols, magcols, errcols
if timecols is None:
timecols = formatspec['timecols']
if magcols is None:
| |
<filename>rosaceae/bins.py<gh_stars>1-10
# -*- coding: UTF-8 -*-
"""
rosaceae.bin
~~~~~~~~~~~~
This module implements data binning.
"""
from __future__ import print_function
import numpy as np
import pandas as pd
from itertools import combinations
from sklearn.tree import DecisionTreeClassifier
def bin_frequency(xarray, bins=5, na_omit=True, verbose=False):
'''Data discretization by the same frequency.
Data are binned by the same frequency. Frequency is controlled by bins
number, frequency = (total length of xarray) / (bins number). Data will
be sorted in ascending. Missing values will be palced at the end.
Parameters
----------
xarray: Pandas.Series or Numpy.array type data.
bins: int,
Number of bins.
na_omit: False or True
Keep or drop missing value. Default is True, missing value will be grouped
in a separate bin.
verbose: True or False, default is False
Returns
-------
Dictionary
Bin as key names. Corresponding row index as values.
'''
xarray = xarray.copy()
xarray.reset_index(drop=True, inplace=True)
xarray.sort_values(inplace=True)
out = {}
if na_omit:
xarray = xarray[~pd.isna(xarray)]
elif not na_omit and sum(pd.isna(xarray)) > 0:
out['Miss'] = np.where(pd.isna(xarray))[0]
xarray = xarray[~pd.isna(xarray)]
step = int(len(xarray) / bins)
if verbose:
print('Step: %s'% step)
for i in range(bins):
group = 'Freq%s' % (i+1)
if i == bins -1:
out[group] = xarray.index[i*step:]
else:
out[group] = xarray.index[i*step:(i+1)*step]
return out
def bin_distance(xarray, bins=5, na_omit=True, verbose=False):
'''Data discretization in the same distance.
Data are binned by the same distance, the distance is controlled by max
interval and bins number. Max interval = max(xarray) - min(xarray),
distance = (max interval) / (bins number).
Parameters
----------
xarray: Pandas.Series or Numpy.array
bins: Int
Number of bins.
na_omit: False or True
Keep or drop missing value. Default is True, missing value will be grouped
in a separate bin.
verbose: True or False, default is False
Returns
-------
Dictionary
Bin as key names. Corresponding row index as values.
'''
distance = (max(xarray) - min(xarray)) / bins
out = {}
xarray = xarray.copy()
xarray.reset_index(drop=True, inplace=True)
if verbose:
print('Distance: %s' % distance)
if not na_omit and sum(pd.isna(xarray)) > 0:
out['Miss'] = np.where(pd.isna(xarray))[0]
MIN = min(xarray)
for i in range(bins):
if i ==0:
left = -np.inf
else:
left = MIN + i * distance
if i == bins-1:
right = np.inf
else:
right = MIN + (i+1) * distance
key = "[%s,%s)" % (left, right)
out[key] = xarray.index[(xarray>=left) & (xarray<right)]
return out
def bin_tree(xarray, y, min_samples_node=0.05, na_omit=True, **kwargs):
'''Binning data according DecisionTree node.
Parameters
----------
xarray : pandas series data
Input feature value, array like, shape = [n_samples] or
[n_samples, 1]
y : array like
The target value, array like, shape = [n_samples] or
[n_samples, n_output]
**kwargs : **kwargs
Keyword arguments for sklearn DecisionTree.
Returns
-------
Dictionary
Bin interval as key names. Corresponding row index list as values.
'''
n_samples = xarray.shape[0]
#print n_samples
clf = DecisionTreeClassifier(random_state=0,
criterion='entropy',
min_samples_split=0.2,
max_leaf_nodes=6,
min_impurity_decrease=0.001,
**kwargs)
if len(xarray.shape) == 1:
xarray = pd.DataFrame(xarray.values.reshape(n_samples, 1))
if not na_omit:
na_where = np.where(pd.isna(xarray.iloc[:,0]))[0]
# reset index for x and y in case of IndexingError
xarray.reset_index(drop=True, inplace=True)
y.reset_index(drop=True, inplace=True)
y = y[~pd.isna(xarray.iloc[:,0])]
xarray_substitute = xarray.dropna() # remove NA value for sklearn
clf.fit(xarray_substitute, y)
children_left = clf.tree_.children_left
children_right = clf.tree_.children_right
threshold = clf.tree_.threshold
nodes_info = clf.tree_.__getstate__()['nodes']
small_nodes = [i for i,j in enumerate(nodes_info)
if j[-2] < n_samples * min_samples_node]
# find leaf node in tree and get threshold in the leaf
breaks = []
for i,(l,r) in enumerate(zip(children_left, children_right)):
if l != r and l not in small_nodes and r not in small_nodes:
breaks.append(threshold[i])
breaks.sort()
breaks = [-np.inf] + breaks + [np.inf]
out = {}
for i, b in enumerate(breaks[1:]):
start = breaks[i]
end = b
key = "[%s:%s)" % (start, end)
out[key] = np.where((xarray >= start) & (xarray < end))[0]
if not na_omit and len(na_where) > 0:
out['Miss'] = na_where
return out
# For chi-square binning
def chi2(a, bad_rate):
b = [int(sum(a)*bad_rate), sum(a)- int(sum(a)*bad_rate)]
chi = (a[0] - b[0])**2 / b[0] + (a[1] - b[1])**2 / b[1]
return chi
def recursion(groups, counts, bins, bad_rate, numeric=False, verbose=False):
max_chi = 0
if not numeric:
for _i, i in combinations(range(len(groups)), 2):
com = (_i, i)
com_count = counts[i] + counts[_i]
tmpchi = chi2(com_count, bad_rate)
if tmpchi > max_chi:
max_chi = tmpchi
max_com_idx = com
# merge similar categories into one
if verbose:
print('("{0}") + ("{1}") --> ("{0},{1}")'.format(groups[max_com_idx[0]], groups[max_com_idx[1]]))
merged = '%s,%s' % (groups[max_com_idx[0]], groups[max_com_idx[1]])
groups = [g for _, g in enumerate(groups) if _ not in max_com_idx]
merged_counts = counts[max_com_idx[0]] + counts[max_com_idx[1]]
counts = [c for _, c in enumerate(counts) if _ not in max_com_idx]
groups.append(merged)
counts.append(merged_counts)
else:
max_com_idx = tuple()
for i in range(1, len(groups)-1):
chi_before = chi2(counts[i-1] + counts[i], bad_rate)
chi_after = chi2(counts[i] + counts[i+1], bad_rate)
if chi_before > max_chi:
max_com_idx = (i-1, i)
max_chi = chi_before
elif chi_after > max_chi:
max_com_idx = (i, i+1)
max_chi = chi_after
merged = (groups[max_com_idx[0]][0], groups[max_com_idx[1]][1]) # create a new boundary
if verbose:
print('*',groups[max_com_idx[0]], groups[max_com_idx[1]], '-->' ,merged)
groups = groups[:max_com_idx[0]] + [merged] + groups[max_com_idx[1]+1:]
merged_counts = counts[max_com_idx[0]] + counts[max_com_idx[1]]
counts = counts[:max_com_idx[0]] + [merged_counts] + counts[max_com_idx[1]+1:]
if len(groups) <= bins:
return groups
else:
return recursion(groups, counts, bins, bad_rate, numeric=numeric, verbose=verbose)
def bin_chi2(xarray, y, bins, min_sample=0.01, na_omit=True, verbose=False):
xarray = xarray.copy()
xarray.reset_index(drop=True, inplace=True)
out = {}
# remove missing values or not
if na_omit:
xarray = xarray[~pd.isna(xarray)]
y = y[~pd.isna(xarray)]
elif not na_omit:
out['Miss'] = xarray.index[pd.isna(xarray)]
total_bad = y.sum()
bad_rate = total_bad / len(y)
print('bad_rate', total_bad, len(y))
# numeric or categorious
if xarray.dtype == 'object':
if verbose:
print('Categorious data detected.')
groups = list(set(xarray[~pd.isna(xarray)]))
counts = []
for g in groups:
tmp = y[xarray==g]
counts.append(np.array(sum(tmp), len(tmp)-sum(tmp)))
groups = recursion(groups, counts, bins, numeric=False, verbose=verbose)
else:
if verbose:
print('Numeric data detected.')
rounds = 50
q25 = np.percentile(xarray, 25)
q75 = np.percentile(xarray, 75)
iqr = q75-q25
min_value = max(min(xarray), q25 - 1.5*iqr)
max_value = min(max(xarray), q75 + 1.5*iqr)
step = (max_value - min_value) / rounds # exclude outlier
borders = [-np.inf] + [min_value + step *i for i in range(1,rounds)] + [np.inf]
if verbose:
print("Range from min value: %s, max value: %s, step: %s" % (min_value, max_value, step))
groups = []
counts = []
# create a boundary list and remove small group
i_start = 0
for _i, b in enumerate(borders[1:]):
start = borders[i_start]
end = b
if sum((xarray >= start) & (xarray < end)) < len(y) * min_sample:
continue
else:
tmp = y[(xarray >= start) & (xarray < end)]
groups.append((start, end))
counts.append(np.array((sum(tmp), len(tmp)-sum(tmp))))
i_start = _i + 1
# add +inf if deleted
if groups[-1][1] != np.inf:
start = groups[-1][0]
end = np.inf
tmp = y[(xarray >= start) & (y < end)]
groups[-1] = (start, end)
counts[-1] = np.array((sum(tmp), len(tmp)-sum(tmp)))
if verbose:
print('Init groups:', groups)
groups = recursion(groups, counts, bins, bad_rate=bad_rate, numeric=True, verbose=verbose)
# reformat numeric groups
for _i, g in enumerate(groups):
if np.isinf(g[0]):
groups[_i] = '(-inf:%s]' % g[1]
elif np.isinf(g[1]):
groups[_i] = '(%s:inf)' % g[0]
else:
groups[_i] = '(%s:%s]' % (g[0], g[1])
return groups
# For custom binning.
def bin_custom(xarray, groups, na_omit=True, verbose=False):
'''Binning data by customized binning boundary
Parameters
----------
xarray: array like data
groups: list like
Custom binning boundary. For numeric data, ['(-inf:3]', '(3:6]', '(6:inf)'].
For categorious data, [('A', 'B'), ('C'), ('D','E', 'Miss')], `Miss` is used
for missing data(NA values). However you can not put `Miss` in numeric data.
na_omit: True or False
Default is False. Get all NA in a separate group. If `Miss` in a customized
categorious data, this Miss separate group will delete from output.
verbose: True or False,
Default is False. Print verbose message.
'''
# reset index
xarray = xarray.copy()
xarray.reset_index(drop=True, inplace=True)
if not isinstance(groups, list):
raise TypeError('groups is a list, contains customized binning boundary')
out = {}
# handle missing data.
if not na_omit:
if verbose:
print('Keep NA data')
tmp = np.where(pd.isna(xarray))[0]
if verbose:
print('Missing data: %s' % len(tmp))
if len(tmp) > 0:
out['Miss'] = np.where(pd.isna(xarray))[0]
if isinstance(groups[0], str) and ':' in groups[0]: # | |
<reponame>jma712/DIRECT<filename>src/main_disent.py<gh_stars>1-10
'''
Disentangled multiple cause effect learning
2020-07-08
'''
import time
import numpy as np
import torch
from torch import optim
from torch import nn
from torch.nn import functional as F
from torchvision.utils import save_image
from torch.utils.data import Dataset, DataLoader
import math
import argparse
import os
import sys
import scipy.io as scio
import matplotlib
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE as tsn
from scipy.stats import pearsonr
from data_synthetic import plot_cluster, generate_y_final, get_y_final
from model_disent import MTvae
sys.path.append('../')
font_sz = 28
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'
matplotlib.rcParams.update({'font.size': font_sz})
parser = argparse.ArgumentParser(description='Disentangled multiple cause VAE')
parser.add_argument('--nocuda', type=int, default=0, help='Disables CUDA training.')
parser.add_argument('--batch-size', type=int, default=1500, metavar='N',
help='input batch size for training (default: 10000)')
parser.add_argument('--epochs', type=int, default=150, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--K', type=int, default=4, metavar='N',
help='number of clusters')
parser.add_argument('--trn_rate', type=float, default=0.6, help='training data ratio')
parser.add_argument('--tst_rate', type=float, default=0.2, help='test data ratio')
parser.add_argument('--mu_p_wt', type=float, default=1.0, help='weight for mu_p_t')
parser.add_argument('--dim_zt', type=int, default=32, metavar='N',
help='dimension of zt')
parser.add_argument('--dim_zi', type=int, default=32, metavar='N',
help='dimension of zi')
parser.add_argument('--nogb', action='store_true', default=False,
help='Disable Gumbel-Softmax sampling.')
parser.add_argument('--beta', type=float, default=20, help='weight for loss balance')
parser.add_argument('--dataset', default='synthetic', help='dataset to use') # synthetic, amazon, amazon-6c
parser.add_argument('--lr', type=float, default=1e-3,
help='learning rate for optimizer')
parser.add_argument('--weight_decay', type=float, default=1e-5,
help='weight decay')
args = parser.parse_args()
# select gpu if available
args.cuda = not args.nocuda and torch.cuda.is_available()
device = torch.device("cuda:0" if args.cuda else "cpu")
args.device = device
print('using device: ', device)
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
def loss_function(input_ins_batch, mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, target, a_reconstby_zt, input_treat_trn):
# 1. recontrust loss
loss_bce = nn.BCELoss(reduction='mean').to(device)
loss_reconst = loss_bce(a_pred.view(-1), input_ins_batch.view(-1))
# 2. KLD_C
KLD_C = torch.mean(torch.sum(qc * torch.log(args.K * qc + 1e-10), dim=1), dim=0)
# 3. E_KLD_QT_PT
mu_zt = mu_zt.unsqueeze(-1)
logvar_zt = logvar_zt.unsqueeze(-1)
mu_p_zt = mu_p_zt.T
logvar_p_zt = logvar_p_zt.T
mu_p_zt = mu_p_zt.unsqueeze(0)
logvar_p_zt = logvar_p_zt.unsqueeze(0)
KLD_QT_PT = 0.5 * (((logvar_p_zt - logvar_zt) + ((logvar_zt.exp() + (mu_zt - args.mu_p_wt * mu_p_zt).pow(2)) / logvar_p_zt.exp())) - 1)
# zt
loss_bce2 = nn.BCELoss(reduction='mean').to(device)
loss_reconst_zt = loss_bce2(a_reconstby_zt.reshape(-1), input_treat_trn.reshape(-1))
qc = qc.unsqueeze(-1) # m x k x 1
qc = qc.expand(-1, args.K, 1) # m x k x 1
E_KLD_QT_PT = torch.mean(torch.sum(torch.bmm(KLD_QT_PT, qc), dim=1), dim=0)
# 4. KL_ZI
# KL_ZI = None
# for k in range(args.K):
# mu_zi_k = mu_zi_list[k] # batch_size x d
# logvar_zi_k = logvar_zi_list[k]
# kl_zi_k = -0.5 * torch.sum(1 + logvar_zi_k - mu_zi_k.pow(2) - logvar_zi_k.exp(), dim=1) # n
# KL_ZI = (kl_zi_k if (KL_ZI is None) else (KL_ZI + kl_zi_k)) #
# KL_ZI = torch.mean(KL_ZI, dim=0)
#
mu_zi_all = None
log_zi_all = None
for k in range(args.K):
mu_zi_k = mu_zi_list[k]
logvar_zi_k = logvar_zi_list[k]
mu_zi_all = mu_zi_k if mu_zi_all is None else torch.cat([mu_zi_all, mu_zi_k], dim=1)
log_zi_all = logvar_zi_k if log_zi_all is None else torch.cat([log_zi_all, logvar_zi_k], dim=1)
KL_ZI = -0.5 * torch.sum(1 + log_zi_all - mu_zi_all.pow(2) - log_zi_all.exp(), dim=1) # n
KL_ZI = torch.mean(KL_ZI, dim=0)
# 5. loss_y
temp = 0.5 * math.log(2 * math.pi)
target = target.view(-1, 1)
bb = - 0.5 * ((target - mu_y).pow(2)) / logvar_y.exp() - 0.5 * logvar_y - temp
loss_y = - torch.mean(torch.sum(- 0.5 * ((target - mu_y).pow(2)) / logvar_y.exp() - 0.5 * logvar_y - temp, dim=1), dim=0)
# MSE_Y
loss_mse = nn.MSELoss(reduction='mean')
loss_y_mse = loss_mse(mu_y, target)
# 6. loss balance
loss_balance = 0.0
loss = loss_reconst + KL_ZI + KLD_C + E_KLD_QT_PT + loss_y
eval_result = {
'loss': loss, 'loss_reconst': loss_reconst, 'KLD_C': KLD_C, 'E_KLD_QT_PT': E_KLD_QT_PT, 'loss_reconst_zt':loss_reconst_zt,
'KL_ZI': KL_ZI, 'loss_y': loss_y, 'loss_y_mse': loss_y_mse, 'loss_balance': loss_balance,
}
return eval_result
def test(model, data_loader, input_treat_trn, adj_assign, Z_i_list, Zt, params, C_true, inx_spec_treat=None, show_cluster=False, show_disent=True, show_y=False):
model.eval()
num_cluster = args.K
m = input_treat_trn.shape[0]
num_assign = len(adj_assign)
pehe = torch.zeros(num_assign, dtype = torch.float)
ite_true_sum = torch.zeros(num_assign, dtype = torch.float)
ite_pred_sum = torch.zeros(num_assign, dtype = torch.float)
adj_pred_correctNum = 0.0 # m
data_size = 0
for batch_idx, (adj_batch, target, orin_index) in enumerate(data_loader):
data_size += adj_batch.shape[0]
batch_size = adj_batch.shape[0]
if args.cuda:
adj_batch = adj_batch.to(device)
orin_index = orin_index.to(device)
mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, a_reconstby_zt = model(
adj_batch, input_treat_trn)
# accuracy of treatment assignment prediction
a_pred[a_pred >= 0.5] = 1.0
a_pred[a_pred < 0.5] = 0.0
if inx_spec_treat is None:
adj_pred_correctNum += (a_pred == adj_batch).sum()
else:
a_pred_spec = a_pred[:, inx_spec_treat]
a_true_spec = adj_batch[:, inx_spec_treat]
adj_pred_correctNum += (a_pred_spec == a_true_spec).sum()
# get true y
if Z_i_list is None:
y_true = torch.zeros((batch_size, len(adj_assign), 1), device=args.device)
y_true_0 = torch.zeros((batch_size, 1), device=args.device)
else:
y_true, y_true_0 = get_y_true_final(orin_index, adj_assign, Z_i_list, Zt, params)
# pehe, ate
adj_batch_0 = torch.zeros([adj_batch.shape[0], m], dtype=torch.float) # batch size x m
if args.cuda:
adj_batch_0 = adj_batch_0.to(device)
y_pred_0, _ = model.predictY(mu_zt, zi_sample_list, qc, adj_batch_0)
for j in range(len(adj_assign)):
adj_assign_j = adj_assign[j] # m
adj_assign_j = adj_assign_j.unsqueeze(0)
adj_assign_j = adj_assign_j.expand(adj_batch.shape[0], m)
if args.cuda:
adj_assign_j = adj_assign_j.to(device)
y_pred_j, _ = model.predictY(mu_zt, zi_sample_list, qc, adj_assign_j)
y_true_j = y_true[:, j, :]
ite_pred_j = y_pred_j - y_pred_0
ite_true_j = y_true_j - y_true_0
pehe[j] = pehe[j] + torch.sum((ite_pred_j - ite_true_j).pow(2))
ite_true_sum[j] = ite_true_sum[j] + torch.sum(ite_true_j)
ite_pred_sum[j] = ite_pred_sum[j] + torch.sum(ite_pred_j)
pehe = torch.sqrt(pehe / data_size)
pehe_ave = torch.sum(pehe) / num_assign
ate = torch.abs(ite_true_sum / data_size - ite_pred_sum / data_size)
ate_ave = torch.sum(ate) / num_assign
if inx_spec_treat is None:
acc_apred = adj_pred_correctNum / (data_size * m)
else:
m_new = len(inx_spec_treat)
acc_apred = adj_pred_correctNum / (data_size * m_new)
# acc of zt
a_reconstby_zt[a_reconstby_zt >= 0.5] = 1.0
a_reconstby_zt[a_reconstby_zt < 0.5] = 0.0
adj_pred_correctNum_zt = 0.0
adj_pred_correctNum_zt += (a_reconstby_zt == input_treat_trn).sum()
acc_apred_zt = adj_pred_correctNum_zt / (input_treat_trn.shape[0] * input_treat_trn.shape[1])
if show_cluster:
C = torch.argmax(qc, dim=1).cpu().detach().numpy() # m
mu_zt = mu_zt.cpu().detach().numpy()
mu_p_zt = args.mu_p_wt * mu_p_zt.cpu().detach().numpy()
Zt_tsn = plot_cluster(mu_zt, C, num_cluster, mu_zt_all=mu_p_zt, saving=False)
# true clusters
plot_cluster(mu_zt, C_true, num_cluster, mu_zt_all=mu_p_zt, saving=False, Zt_tsn=Zt_tsn)
eval_result = {
'pehe': pehe_ave, 'ate': ate_ave, 'acc_apred': acc_apred,
'acc_apred_zt': acc_apred_zt
}
return eval_result
def train(epochs, model, trn_loader, val_loader, tst_loader, input_treat_trn, adj_assign, Z_i_list, Zt, params, C_true, optimizer, with_test=True, active_opt=[True, True, True, True]):
time_begin = time.time()
model.train()
print("start training!")
optimizer_1 = optimizer[0]
optimizer_2 = optimizer[1]
optimizer_3 = optimizer[2]
optimizer_4 = optimizer[3]
for epoch in range(epochs):
for batch_idx, (adj_batch, target, orin_index) in enumerate(trn_loader):
if args.cuda:
adj_batch = adj_batch.to(device)
target = target.to(device)
optimizer_1.zero_grad()
optimizer_2.zero_grad()
optimizer_3.zero_grad()
optimizer_4.zero_grad()
# forward pass
if active_opt[0]:
for i in range(5):
optimizer_1.zero_grad()
mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, a_reconstby_zt = model(adj_batch, input_treat_trn)
eval_result = loss_function(adj_batch, mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, target, a_reconstby_zt,input_treat_trn)
loss, KLD_C, E_KLD_QT_PT, loss_a_reconst_zt, loss_reconst, KL_ZI, KLD_C, loss_y, loss_y_mse = \
eval_result['loss'], eval_result['KLD_C'], eval_result['E_KLD_QT_PT'], eval_result['loss_reconst_zt'], eval_result['loss_reconst'], eval_result['KL_ZI'], eval_result['KLD_C'], eval_result['loss_y'], eval_result['loss_y_mse']
# backward propagation
(loss_a_reconst_zt).backward()
optimizer_1.step()
if active_opt[2]:
for i in range(3):
optimizer_3.zero_grad()
mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, a_reconstby_zt = model(
adj_batch, input_treat_trn)
eval_result = loss_function(adj_batch, mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list,
logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, target,
a_reconstby_zt, input_treat_trn)
loss, KLD_C, E_KLD_QT_PT, loss_a_reconst_zt, loss_reconst, KL_ZI, KLD_C, loss_y, loss_y_mse = \
eval_result['loss'], eval_result['KLD_C'], eval_result['E_KLD_QT_PT'], eval_result[
'loss_reconst_zt'], eval_result['loss_reconst'], eval_result['KL_ZI'], eval_result['KLD_C'], \
eval_result['loss_y'], eval_result['loss_y_mse']
# backward propagation
pm_beta = 1.0 if epoch < 100 else args.beta
(loss_reconst + pm_beta * KL_ZI).backward()
optimizer_3.step()
if active_opt[3]:
for i in range(20):
optimizer_4.zero_grad()
mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, a_reconstby_zt = model(
adj_batch, input_treat_trn)
eval_result = loss_function(adj_batch, mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list,
logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, target,
a_reconstby_zt, input_treat_trn)
loss, KLD_C, E_KLD_QT_PT, loss_a_reconst_zt, loss_reconst, KL_ZI, KLD_C, loss_y, loss_y_mse = \
eval_result['loss'], eval_result['KLD_C'], eval_result['E_KLD_QT_PT'], eval_result[
'loss_reconst_zt'], eval_result['loss_reconst'], eval_result['KL_ZI'], eval_result['KLD_C'], \
eval_result['loss_y'], eval_result['loss_y_mse']
# backward propagation
loss_y.backward()
optimizer_4.step()
# optimize for the centroid
if active_opt[1]:
for i in range(20):
optimizer_2.zero_grad()
# forward pass
mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list, logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, a_reconstby_zt = model(
adj_batch, input_treat_trn)
eval_result = loss_function(adj_batch, mu_zt, logvar_zt, mu_p_zt, logvar_p_zt, qc, mu_zi_list,
logvar_zi_list, zi_sample_list, a_pred, mu_y, logvar_y, target,
a_reconstby_zt, input_treat_trn)
loss, KLD_C, E_KLD_QT_PT, loss_a_reconst_zt, loss_reconst, KL_ZI, KLD_C, loss_y, loss_y_mse = \
eval_result['loss'], eval_result['KLD_C'], eval_result['E_KLD_QT_PT'], eval_result[
'loss_reconst_zt'], eval_result['loss_reconst'], eval_result['KL_ZI'], eval_result['KLD_C'], \
eval_result['loss_y'], eval_result['loss_y_mse']
# backward propagation
(5*KLD_C+E_KLD_QT_PT).backward()
optimizer_2.step()
# evaluate
if epoch % 100 == 0:
show_disent = True
model.eval()
# eval_result_val = test(model, val_loader, input_treat_trn, adj_assign, Z_i_list, Zt, params, C_true)
eval_result_tst = test(model, tst_loader, input_treat_trn, adj_assign, Z_i_list, Zt, params, C_true, show_disent=show_disent)
pehe_tst, mae_ate_tst = eval_result_tst['pehe'], eval_result_tst['ate']
print('Epoch: {:04d}'.format(epoch + 1),
'pehe_tst: {:.4f}'.format(pehe_tst.item()),
'mae_ate_tst: {:.4f}'.format(mae_ate_tst.item()),
'time: {:.4f}s'.format(time.time() - time_begin))
model.train()
return
class Synthetic_dataset(Dataset):
def __init__(self, adj, y, trn_idx=None, val_idx=None, tst_idx=None, type='train'):
| |
translate_status_exception(
error, self, "get_manual_journal_attachment_by_id"
)
def get_manual_journal_attachments(
self,
xero_tenant_id,
manual_journal_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves attachment for a specific manual journal # noqa: E501
OAuth2 scope: accounting.attachments, accounting.attachments.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str manual_journal_id: Unique identifier for a ManualJournal (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Attachments
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_manual_journal_attachments`"
)
# verify the required parameter 'manual_journal_id' is set
if manual_journal_id is None:
raise ValueError(
"Missing the required parameter `manual_journal_id` "
"when calling `get_manual_journal_attachments`"
)
collection_formats = {}
path_params = {
"ManualJournalID": manual_journal_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/ManualJournals/{ManualJournalID}/Attachments")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Attachments",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "get_manual_journal_attachments"
)
def get_manual_journals(
self,
xero_tenant_id,
if_modified_since=empty,
where=empty,
order=empty,
page=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves manual journals # noqa: E501
OAuth2 scope: accounting.transactions, accounting.transactions.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param datetime if_modified_since: Only records created or modified since this timestamp will be returned
:param str where: Filter by an any element
:param str order: Order by an any element
:param int page: e.g. page=1 – Up to 100 manual journals will be returned in a single API call with line items shown for each overpayment
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: ManualJournals
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_manual_journals`"
)
collection_formats = {}
path_params = {}
query_params = []
if where is not empty:
query_params.append(("where", where))
if order is not empty:
query_params.append(("order", order))
if page is not empty:
query_params.append(("page", page))
header_params = {
"xero-tenant-id": xero_tenant_id,
}
if if_modified_since is not empty:
header_params["If-Modified-Since"] = if_modified_since
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/ManualJournals")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="ManualJournals",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_manual_journals")
def get_manual_journals_history(
self,
xero_tenant_id,
manual_journal_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves history for a specific manual journal # noqa: E501
OAuth2 scope: accounting.transactions, accounting.transactions.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str manual_journal_id: Unique identifier for a ManualJournal (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: HistoryRecords
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_manual_journals_history`"
)
# verify the required parameter 'manual_journal_id' is set
if manual_journal_id is None:
raise ValueError(
"Missing the required parameter `manual_journal_id` "
"when calling `get_manual_journals_history`"
)
collection_formats = {}
path_params = {
"ManualJournalID": manual_journal_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/ManualJournals/{ManualJournalID}/History")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="HistoryRecords",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_manual_journals_history")
def get_online_invoice(
self,
xero_tenant_id,
invoice_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a URL to an online invoice # noqa: E501
OAuth2 scope: accounting.transactions, accounting.transactions.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str invoice_id: Unique identifier for an Invoice (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: OnlineInvoices
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_online_invoice`"
)
# verify the required parameter 'invoice_id' is set
if invoice_id is None:
raise ValueError(
"Missing the required parameter `invoice_id` "
"when calling `get_online_invoice`"
)
collection_formats = {}
path_params = {
"InvoiceID": invoice_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Invoices/{InvoiceID}/OnlineInvoice")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="OnlineInvoices",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_online_invoice")
def get_organisation_actions(
self,
xero_tenant_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a list of the key actions your app has permission to perform in the connected Xero organisation. # noqa: E501
OAuth2 scope: accounting.settings, accounting.settings.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Actions
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_organisation_actions`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Organisation/Actions")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Actions",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_organisation_actions")
def get_organisation_cis_settings(
self,
xero_tenant_id,
organisation_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves the CIS settings for the Xero organistaion. # noqa: E501
OAuth2 scope: accounting.settings, accounting.settings.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str organisation_id: The unique Xero identifier for an organisation (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: CISOrgSettings
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_organisation_cis_settings`"
)
# verify the required parameter 'organisation_id' is set
if organisation_id is None:
raise ValueError(
"Missing the required parameter `organisation_id` "
"when calling `get_organisation_cis_settings`"
)
collection_formats = {}
path_params = {
"OrganisationID": organisation_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Organisation/{OrganisationID}/CISSettings")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="CISOrgSettings",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "get_organisation_cis_settings"
)
def get_organisations(
self,
xero_tenant_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves Xero organisation details # noqa: E501
OAuth2 scope: accounting.settings, accounting.settings.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Organisations
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_organisations`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Organisation")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
| |
increases max_viking to 16 if air units detected
self.maxmedivacs = 0
self.liberator_left = 0
self.max_siege = 0
self.max_barracks = 1 # maxamount of barracks
self.build_barracks_addons = False
self.super_fast_barracks = False
self.barracks_reactor_first = False
self.delay_barracs = False
self.maxfactory = 2
self.delay_factory = False
self.max_starports = 1
self.build_starportreactor = 1
self.hellion_left = 2
self.morph_to_hellbats = False
self.max_engineeringbays = 1
self.build_armory = True
self.fast_armory = False
self.upgrade_mech = True
self.fast_engineeringbay = True
self.maxmarauder = 0
self.build_barracks_reactors = True
self.assault_enemy_home = False
self.careful_marines = False
self.build_missile_turrets = False
self.mineral_field_turret = True
self.NukesLeft = 0 # max 10. If used 11 or more changes many variables
self.mech_build = False
self.expand_for_vespene = True
self.expand_fast_for_vespene = True
self.fast_orbital = True # slow orbital makes first OC after first expansion is pending
self.research_stimpack = False
self.research_combatshield = False
self.upgrade_marine = False
self.research_concussiveshels = False
self.upgrade_marine_defence_and_mech_attack = False
self.upgrade_vehicle_weapons = True
self.priority_tank = False
self.siege_behind_wall = False
self.build_extra_factories = True
self.build_extra_starports = False
elif self.strategy == 15: # cc first:
self.more_depots = True
self.send_scout = False
self.cc_first = True
self.delay_third = True
self.supply_limit_for_third = 140
self.fast_vespene = True
self.scv_build_speed = 3
self.scv_limit = 80
self.greedy_scv_consrtuction = False
self.fast_engineeringbay = False
self.refineries_in_first_base = 0
self.refineries_in_second_base = 4
self.greedy_third = False
self.max_barracks = 4
self.super_fast_barracks = False
self.delay_barracs = True
self.barracks_reactor_first = True
self.min_marine = 20
self.max_marine = 60
self.careful_marines = False
self.maxmarauder = 20
self.MaxGhost = 0
self.maxfactory = 1
self.mines_left = 0
self.hellion_left = 0
self.activate_all_mines = True
self.aggressive_mines = True
self.max_siege = 6
self.max_starports = 2
self.build_starportreactor = 1
self.max_viking = 8
self.maxmedivacs = 6
self.dual_liberator = True
self.banshee_left = 0
if self.iteraatio == 25 and self.chat:
# await self._client.chat_send("InsANIty. Friends call me ANI. 15.2.2021", team_only=False)
await self._client.chat_send("Artificial No Intelligence 10.4.2021. GLHF.", team_only=False)
if self.iteraatio == 50:
if self.strategy == 1:
if self.chat:
await self._client.chat_send("Greed", team_only=False)
print("Strat: Greed")
elif self.strategy == 2:
if self.chat:
await self._client.chat_send("Strategy: 2 base push", team_only=False)
print("Strat: 2 base push")
elif self.strategy == 3:
if self.chat:
await self._client.chat_send("Strategy: Terran Bio", team_only=False)
print("Strat: Terran Bio")
elif self.strategy == 4:
if self.chat:
await self._client.chat_send("Strategy: Mech build", team_only=False)
print("Strat: Mech build")
elif self.strategy == 5:
if self.chat:
await self._client.chat_send("Strategy: Air superiority. ", team_only=False)
print("Strat: Air superiority")
elif self.strategy == 6:
if self.chat:
await self._client.chat_send("Strategy: Nuke", team_only=False)
print("Strat: Nuke")
elif self.strategy == 7:
if self.chat:
await self._client.chat_send("Strategy: Ghost", team_only=False)
print("Strat: Ghost")
elif self.strategy == 8:
if self.chat:
await self._client.chat_send("Strategy: Marine drop", team_only=False)
print("Strat: Marine drop")
elif self.strategy == 9:
if self.chat:
await self._client.chat_send("Strategy: test", team_only=False)
print("Strat: test")
elif self.strategy == 10:
if self.chat:
await self._client.chat_send("Strategy: No starport", team_only=False)
print("Strat: No starport")
elif self.strategy == 11:
if self.chat:
await self._client.chat_send("Strategy: Marauders", team_only=False)
print("Strat: Marauders")
elif self.strategy == 12:
if self.chat:
await self._client.chat_send("Strategy: 1-1-1 opener", team_only=False)
print("Strat: 1-1-1 opener")
elif self.strategy == 13:
if self.chat:
await self._client.chat_send("Strategy: Minefields", team_only=False)
print("Strat: Minefields")
elif self.strategy == 14:
if self.chat:
await self._client.chat_send("Strategy: MCV", team_only=False)
print("Strat: MCV")
elif self.strategy == 15:
if self.chat:
await self._client.chat_send("Strategy: cc first", team_only=False)
print("Strat: cc first")
self.save_units_on_cooldown()
self.iteraatio += 1
async def ghost_nuke_spotter_micro(self, ghost):
if not self.enemy_structures.closer_than(2, self.nuke_target):
self.nuke_target = None
self.nuke_spotter_tag = None
return
if self.nuke_target and self.is_visible(self.nuke_target.position) and ghost.distance_to(
self.nuke_target.position) < 15:
self.do(ghost(AbilityId.TACNUKESTRIKE_NUKECALLDOWN, self.nuke_target.position, queue=False))
print("NUKE")
return
if ghost.health_percentage < 1 or ghost.energy < 15:
self.do(ghost.move(self.homeBase.position))
self.nuke_target = None
self.nuke_spotter_tag = None
self.nuke_spotter_last_alive_spot = ghost.position
return
if await self.can_cast(ghost, AbilityId.BEHAVIOR_CLOAKON_GHOST):
if self.enemy_units_and_structures.filter(lambda x: x.can_attack_ground).closer_than(20, ghost):
self.do(ghost(AbilityId.BEHAVIOR_CLOAKON_GHOST))
return
if ghost.distance_to(self.start_location) > (self.defence_radius * 1.33):
self.do(ghost(AbilityId.BEHAVIOR_CLOAKON_GHOST))
return
target_position = self.nuke_target.random_on_distance(3)
grid = self.map_data.get_pyastar_grid()
for unit in self.enemy_units_in_memory:
if unit.is_detector:
grid = self.map_data.add_cost(position=unit.position, radius=(unit.sight_range + 2), grid=grid)
else:
grid = self.map_data.add_cost(position=unit.position, radius=(unit.ground_range + 2), grid=grid, weight=1)
grid = self.map_data.add_cost(position=unit.position, radius=2, grid=grid, weight=10)
for zone in self.sweep_zones:
if ghost.distance_to(zone) < 15:
self.do(ghost.move(self.homeBase.position))
self.nuke_target = None
self.nuke_spotter_tag = None
self.nuke_spotter_last_alive_spot = ghost.position
return
grid = self.map_data.add_cost(position=zone, radius=15, grid=grid)
print("sweep detected")
if self.nuke_spotter_last_died_spot:
grid = self.map_data.add_cost(position=self.nuke_spotter_last_died_spot, radius=10, grid=grid)
print("death spot detected")
for detector in (self.enemy_structures).filter(lambda x: x.is_detector):
grid = self.map_data.add_cost(position=detector.position, radius=detector.sight_range + 2,
grid=grid)
# print("detector detected")
path = self.map_data.pathfind(start=ghost.position, goal=target_position, grid=grid,
allow_diagonal=True,
sensitivity=1)
# self.map_data.plot_influenced_path(start=ghost.position, goal=target_position, weight_array=grid,
# allow_diagonal=True)
# self.map_data.show()
if path:
steps_to_skipp = 3
for step in path:
if steps_to_skipp > 0:
steps_to_skipp -= 1
continue
else:
self.do(ghost.move(step, queue=False))
break
else:
self.nuke_spotter_tag = None
self.nuke_target = None
if self.chat:
await self._client.chat_send("Ghost pathing error.", team_only=False)
async def first_base_saturated(self):
if self.ccANDoc.ready.amount != 1:
print("first base saturation error")
for cc in self.ccANDoc.ready:
if self.barracks and cc.assigned_harvesters >= (cc.ideal_harvesters + self.first_base_saturation):
return True
return False
async def we_need_orbital(self):
if not (self.barracks.ready or self.barracksflyings):
return False
if not self.cc.ready.idle:
return False
if self.already_pending(UnitTypeId.ORBITALCOMMAND):
return False
for cc in self.cc.ready.idle:
if cc.health_percentage < 1:
return False
if self.delay_expansion:
if self.minerals > 200:
return True
if self.fast_orbital:
return True
elif self.ccANDoc.amount > 1:
return True
else:
return False
async def is_expansions_left(self):
if await self.get_next_expansion() is None:
# self.limit_vespene = 0
print("No expansions left. is_expansions_left returns False")
return False
else:
return True
async def we_should_expand(self):
if self.marauder_push_limit != 0:
return False
if self.cached_we_should_expand is None:
self.cached_we_should_expand = await self.cache_we_should_expand()
return self.cached_we_should_expand
async def cache_we_should_expand(self):
if self.cc_first:
if self.ccANDoc.amount == 1 and (self.supplydepots or self.already_pending(UnitTypeId.SUPPLYDEPOT)):
return True
elif self.already_pending(UnitTypeId.COMMANDCENTER):
self.cc_first = False
return False
if self.send_flanking_units > 0 and self.minerals > 400 \
and self.ccANDoc.amount + self.townhalls_flying.amount == 1:
return True
if self.first_base_saturation < 0 and self.enemy_structures.closer_than(10, self.natural):
self.first_base_saturation = 0
if self.townhalls_flying and self.enemy_units.closer_than(self.defence_radius,
self.start_location).amount > 3:
if self.super_greed:
self.super_greed = False
self.refineries_in_second_base = 3
if self.chat:
await self._client.chat_send("I'm not ready yet. Just wait 6 minutes. Ok?", team_only=False)
if await self.is_expansions_left():
return True
else:
return False
print("Expansion needed, but under attack!")
return False
if self.greedy_third:
if self.ccANDoc.amount >= 3:
self.greedy_third = False
self.more_depots = True
return False
if self.ccANDoc.ready.amount == 2 and not self.enemy_units:
print("Saving minerals for greedy expansion", self.minerals)
if await self.is_expansions_left():
return True
else:
return False
if self.super_greed:
if self.enemy_structures.closer_than(self.defence_radius, self.homeBase):
self.super_greed = False
for cc in self.cc:
if self.enemy_structures.closer_than(15, cc) and \
await self.has_ability(AbilityId.CANCEL_BUILDINPROGRESS, cc):
self.do(cc(AbilityId.CANCEL_BUILDINPROGRESS))
if self.chat:
await self._client.chat_send("Take your buildings to your own base! Ok?", team_only=False)
return False
if self.ccANDoc.amount > 2:
self.super_greed = False
if self.barracks:
if self.already_pending(UnitTypeId.COMMANDCENTER) and self.minerals < 400:
return False
if await self.is_expansions_left():
return True
else:
self.super_greed = False
return False
else:
return False
if not self.barracks.ready:
if self.minerals > 400:
if await self.is_expansions_left():
return True
else:
return False
else:
return False
if self.build_cc_home and self.ccANDoc.amount > 1:
for cc in self.ccANDoc:
is_in_expansion_location = False
for expansion in self.expansion_locations_list:
if cc.position.distance_to(expansion) < 3:
is_in_expansion_location = True
break
if not is_in_expansion_location:
return False
if self.townhalls_flying.filter(lambda x: x.health_percentage >= 1):
return False
if self.doner_location:
print("waiting for priority building")
return False
if await self.we_need_orbital():
return False
elif self.marine_drop and not self.dropship_sent:
return False
elif self.already_pending(UnitTypeId.COMMANDCENTER):
return False
elif self.delay_third and (self.ccANDoc.amount == 2):
return False
elif self.delay_expansion and self.ccANDoc.amount == 1:
if self.supply_used > 55 and self.marauders:
self.delay_expansion = False
for unit in (self.marauders | self.marines):
self.add_unit_to_kamikaze_troops(unit)
self.kamikaze_target = self.enemy_third
return False
else:
if self.ccANDoc.amount == 1:
if await self.first_base_saturated():
if await self.is_expansions_left():
return True
else:
return False
else:
return False
jobs_available = 0 - self.already_pending(UnitTypeId.SCV)
for cc in self.ccANDoc:
jobs_available = jobs_available + cc.ideal_harvesters - cc.assigned_harvesters
if jobs_available > 1:
return False
else:
if await self.is_expansions_left():
return True
else:
return False
print("Error expand")
return False
async def scout_offsets(self, location):
p = location.position
offset_distance = 8
return [
Point2((p.x - offset_distance, p.y - offset_distance)),
Point2((p.x - offset_distance, p.y + offset_distance)),
Point2((p.x + offset_distance, p.y - offset_distance)),
Point2((p.x + offset_distance, p.y + offset_distance)),
]
async def scout_points(self):
enemy_home_scout_points = await self.scout_offsets(self.enemy_start_location)
return [
self.natural,
self.enemy_natural.towards(self.game_info.map_center, 5),
enemy_home_scout_points[0],
enemy_home_scout_points[1],
enemy_home_scout_points[3],
enemy_home_scout_points[2],
self.enemy_natural.towards(self.game_info.map_center, 5),
self.natural,
]
async def search_for_proxy(self, unit):
possible_proxy_locations = sorted(self.expansion_locations_list,
key=lambda p: p.distance_to(self.start_location), reverse=False)
self.do(unit.move(possible_proxy_locations[3], queue=True))
self.do(unit.move(possible_proxy_locations[2], queue=True))
self.do(unit.move(possible_proxy_locations[1], queue=True))
async def cashe_effects(self):
efektit = self.state.effects
self.enemy_liberation_zone = []
for effect in efektit:
if effect.id in [EffectId.SCANNERSWEEP]:
self.sweep_timer = 20
for position in effect.positions:
if not position in self.sweep_zones:
self.sweep_zones.append(position)
if | |
tree.child.where_condition is not None:
where_str = "\t\t\tif("
where_str +=__where_convert_to_java__(tree.child.where_condition.where_condition_exp,buf_dict)
where_str += "){\n"
print >>fo,where_str
buf_dict = {}
for tn in tree.table_list:
buf_dict[tn] = line_buffer
print >>fo,"\t\t\tif(" + adv_count_output +".containsKey(hash_key)){"
print >>fo,"\t\t\t\tInteger count = "+adv_count_output+".get(hash_key)+1;"
print >>fo,"\t\t\t\t"+adv_count_output+".put(hash_key,count);"
print >>fo,"\t\t\t}else{"
print >>fo,"\t\t\t\t"+adv_count_output+".put(hash_key,1);"
print >>fo,"\t\t\t}"
for i in range(0,len(gb_exp_list)):
exp = gb_exp_list[i]
func_name = ystree.__groupby_func_name__(exp)
tmp = ""
if isinstance(tree.child,ystree.TableNode):
tmp_exp = copy.deepcopy(exp)
col_list = []
ystree.__get_func_para__(tmp_exp,col_list)
for x in col_list:
x.column_name = tree.child.select_list.tmp_exp_list[x.column_name].column_name
tmp = __select_func_convert_to_java__(tmp_exp,buf_dict)
else:
tmp = __select_func_convert_to_java__(exp,buf_dict)
if func_name == "MAX":
print >>fo,"\t\t\tif(" + adv_gb_output + "["+str(i)+"].containsKey(hash_key)){"
print >>fo,"\t\t\t\tDouble max_tmp = (double)" + tmp + ";"
print >>fo,"\t\t\t\tif(max_tmp > "+adv_gb_output+"["+str(i)+"].get(hash_key))"
print >>fo,"\t\t\t\t\t"+adv_gb_output+"["+str(i)+"].put(hash_key,max_tmp);"
print >>fo,"\t\t\t}else{"
print >>fo,"\t\t\t\t" + adv_gb_output+"["+str(i)+"].put(hash_key,(double)" + tmp + ");"
print >>fo,"\t\t\t}"
elif func_name == "MIN":
print >>fo,"\t\t\tif(" + adv_gb_output + "["+str(i)+"].containsKey(hash_key)){"
print >>fo,"\t\t\t\tDouble min_tmp = (double)"+tmp +";"
print >>fo,"\t\t\t\tif(min_tmp < "+adv_gb_output+"["+str(i)+"].get(hash_key))"
print >>fo,"\t\t\t\t\t"+adv_gb_output+"["+str(i)+"].put(hash_key,min_tmp);"
print >>fo,"\t\t\t}else{"
print >>fo,"\t\t\t\t" + adv_gb_output+"["+str(i)+"].put(hash_key,(double)"+tmp + ");"
print >>fo,"\t\t\t}"
elif func_name == "SUM" or func_name == "AVG":
print >>fo,"\t\t\tif(" + adv_gb_output + "["+str(i)+"].containsKey(hash_key)){"
print >>fo,"\t\t\t\tDouble sum_tmp = (double)"+tmp+";"
print >>fo,"\t\t\t\tsum_tmp += " +adv_gb_output+"[" +str(i)+"].get(hash_key);"
print >>fo,"\t\t\t\t"+adv_gb_output+"["+str(i)+"].put(hash_key, sum_tmp);"
print >>fo,"\t\t\t}else{"
print >>fo,"\t\t\t\t" + adv_gb_output+"["+str(i)+"].put(hash_key,(double)"+tmp+");"
print >>fo,"\t\t\t}"
if isinstance(tree.child,ystree.TableNode) and tree.child.where_condition is not None:
print >>fo,"\t\t\t}\n"### end where condition
else:
#### no map part agg
if not isinstance(tree.child,ystree.TableNode) or tree.child.where_condition is None:
tmp_output = "\t\t\tcontext.write("
tmp_output += "new " + map_key_type + "(" + map_key +")"
tmp_output += ","
tmp_output += "new " + map_value_type + "(" + map_value + ")"
tmp_output += ");"
print >>fo, tmp_output
else:
where_str = "\t\t\tif("
where_str +=__where_convert_to_java__(tree.child.where_condition.where_condition_exp,buf_dict)
where_str += "){\n"
print >>fo,where_str
tmp_output = "\t\t\t\tcontext.write( "
tmp_output += "new " + map_key_type + "(" + map_key +")"
tmp_output += ","
tmp_output += "new " + map_value_type + "(" + map_value + ")"
tmp_output += ");"
print >>fo, tmp_output
print >>fo,"\t\t\t}" # end of if
print >>fo,"\t\t}\n"
print >>fo,"\t}\n"
###### groupby reduce part
line_counter = "al_line"
agg_buffer = "result"
d_count_buffer = "d_count_buf"
buf_dict = {}
for x in tree.table_list:
buf_dict[x] = line_buffer
reduce_key_type = "NullWritable"
reduce_value_type = "Text"
print >>fo,"\tpublic static class Reduce extends Reducer<"+ map_key_type+","+map_value_type+","+reduce_key_type+","+reduce_value_type+">{\n"
print >>fo,"\t\tpublic void reduce("+map_key_type+" key, Iterable<"+map_value_type+"> v, Context context) throws IOException,InterruptedException{\n"
print >>fo, "\t\t\tIterator values = v.iterator();"
print >>fo, "\t\t\tDouble[] "+agg_buffer+" = new Double[" + str(len(gb_exp_list)) + "];"
print >>fo, "\t\t\tArrayList[] "+d_count_buffer+" = new ArrayList[" + str(len(gb_exp_list)) + "];"
print >>fo, "\t\t\tString tmp = \"\";"
print >>fo, "\t\t\tfor(int i=0;i<"+str(len(gb_exp_list))+";i++){\n"
print >>fo, "\t\t\t\t"+agg_buffer+"[i] = 0.0;"
print >>fo, "\t\t\t\t" + d_count_buffer + "[i] = new ArrayList();"
print >>fo, "\t\t\t}\n"
if config.advanced_agg is False:
### no map agg
print >>fo, "\t\t\tint " + line_counter + " = 0;"
print >>fo,"\t\t\twhile(values.hasNext()){\n"
print >>fo, "\t\t\t\ttmp = values.next().toString();"
if map_key_type == "Text":
print >>fo, "\t\t\t\ttmp = key.toString().concat(tmp);"
else:
print >>fo, "\t\t\t\ttmp = key.toString().concat(\"|\" + tmp);"
print >>fo, "\t\t\t\tString[] " + line_buffer + " = tmp.split(\"\\\|\");"
for i in range(0,len(gb_exp_list)):
exp = gb_exp_list[i]
tmp_output = __select_func_convert_to_java__(exp,buf_dict)
tmp_name = ystree.__groupby_func_name__(exp)
if tmp_name == "SUM" or tmp_name == "AVG":
print >>fo, "\t\t\t\t"+agg_buffer+"[" + str(i) + "] = "+agg_buffer+"[" +str(i) + "] + " + tmp_output + ";"
elif tmp_name == "COUNT_DISTINCT":
print >>fo, "\t\t\t\tif("+d_count_buffer+"[" + str(i) + "].contains(" +tmp_output+ ") == false)"
print >>fo, "\t\t\t\t\t"+d_count_buffer+"[" + str(i) + "].add(" + tmp_output + ");"
elif tmp_name == "MAX":
print >>fo,"\t\t\t\tif("+line_counter+"==0)"
print >>fo,"\t\t\t\t\t"+agg_buffer+"[" + str(i) + "] = (double)" + tmp_output + ";"
print >>fo,"\t\t\t\telse{"
print >>fo, "\t\t\t\t\tif("+agg_buffer+"[" + str(i) + "] < " + tmp_output + ")"
print >>fo, "\t\t\t\t\t\t"+agg_buffer+"[" + str(i) + "] = (double)" + tmp_output + ";"
print >>fo, "\t\t\t\t}"
elif tmp_name == "MIN":
print >>fo,"\t\t\t\tif("+line_counter+"==0)"
print >>fo,"\t\t\t\t\t"+agg_buffer+"[" + str(i) + "] = (double)" + tmp_output + ";"
print >>fo,"\t\t\t\telse{"
print >>fo, "\t\t\t\t\tif("+agg_buffer+"[" + str(i) + "] > " + tmp_output + ")"
print >>fo, "\t\t\t\t\t\t"+agg_buffer+"[" + str(i) + "] = (double)" + tmp_output + ";"
print >>fo, "\t\t\t\t}"
print >>fo, "\t\t\t\t" + line_counter + "++;"
print >>fo, "\t\t\t}\n" ### end of while
else:
## map part agg
print >>fo, "\t\t\tint[] " + line_counter + " = new int["+str(len(gb_exp_list)) + "];"
print >>fo,"\t\t\tfor(int i=0;i<"+str(len(gb_exp_list)) + ";i++){"
print >>fo,"\t\t\t\t"+line_counter+"["+str(i)+"] = 0;"
print >>fo,"\t\t\t}"
print >>fo,"\t\t\tint tmp_count = 0;"
print >>fo,"\t\t\twhile(values.hasNext()){\n"
print >>fo,"\t\t\t\tString[] tmp_buf = values.next().toString().split(\"\\\|\");"
print >>fo,"\t\t\t\ttmp = key.toString();"
print >>fo,"\t\t\t\tString[] agg_tmp;"
for i in range(0,len(gb_exp_list)):
exp = gb_exp_list[i]
func_name = ystree.__groupby_func_name__(exp)
print >>fo,"\t\t\t\tagg_tmp = tmp_buf["+str(i)+"].split(\"&\");"
if func_name == "SUM":
print >>fo, "\t\t\t\t"+agg_buffer+"[" + str(i) + "] += Double.parseDouble(agg_tmp[0]);"
elif func_name == "MIN":
print >>fo,"\t\t\t\tif(tmp_count==0)"
print >>fo,"\t\t\t\t\t"+agg_buffer+"[" + str(i) + "]= Double.parseDouble(agg_tmp[0]);"
print >>fo,"\t\t\t\telse if("+agg_buffer+"["+str(i)+"]>Double.parseDouble(agg_tmp[0]))"
print >>fo,"\t\t\t\t\t"+agg_buffer+"[" + str(i) + "]= Double.parseDouble(agg_tmp[0]);"
elif func_name == "MAX":
print >>fo,"\t\t\t\tif(tmp_count==0)"
print >>fo,"\t\t\t\t\t"+agg_buffer+"[" + str(i) + "]= Double.parseDouble(agg_tmp[0]);"
print >>fo,"\t\t\t\telse if("+agg_buffer+"["+str(i)+"]<Double.parseDouble(agg_tmp[0]))"
print >>fo,"\t\t\t\t\t"+agg_buffer+"[" + str(i) + "]= Double.parseDouble(agg_tmp[0]);"
elif func_name == "COUNT":
print >>fo, "\t\t\t\t"+line_counter+"["+str(i)+"]+= Double.parseDouble(agg_tmp[0]);"
elif func_name == "AVG":
print >>fo, "\t\t\t\t"+agg_buffer+"["+str(i)+"] += Double.parseDouble(agg_tmp[0]);"
print >>fo, "\t\t\t\t"+line_counter+"["+str(i)+"]+= Double.parseDouble(agg_tmp[1]);"
print >>fo,"\t\t\t\ttmp_count++;"
print >>fo,"\t\t\t}" #### end of while
print >>fo, "\t\t\tString[] " + line_buffer + " = tmp.split(\"\\\|\");"
if config.advanced_agg is True:
for i in range(0,len(gb_exp_list)):
exp = gb_exp_list[i]
if not isinstance(exp,ystree.YFuncExp):
print >>sys.stderr,"Internal Error:__groupby_gen_mr__"
exit(29)
tmp_name = ystree.__groupby_func_name__(exp)
if tmp_name == "AVG":
print >>fo, "\t\t\t"+agg_buffer+"[" + str(i) + "] = "+agg_buffer+"[" + str(i) + "] /"+line_counter+"["+str(i)+"];"
elif tmp_name == "COUNT":
print >>fo, "\t\t\t"+agg_buffer+"[" + str(i) + "] = (double)"+ line_counter + "["+str(i)+"];"
else:
for i in range(0,len(gb_exp_list)):
exp = gb_exp_list[i]
if not isinstance(exp,ystree.YFuncExp):
print >>sys.stderr,"Internal Error:__groupby_gen_mr__"
exit(29)
tmp_name = ystree.__groupby_func_name__(exp)
if tmp_name == "AVG":
print >>fo, "\t\t\t"+agg_buffer+"[" + str(i) + "] = "+agg_buffer+"[" + str(i) + "] /"+line_counter+";"
elif tmp_name == "COUNT":
print >>fo, "\t\t\t"+agg_buffer+"[" + str(i) + "] = (double)"+ line_counter+";"
elif tmp_name == "COUNT_DISTINCT":
print >>fo, "\t\t\t"+agg_buffer+"[" + str(i) + "] = (double)"+d_count_buffer+"["+str(i)+"].size();"
col_list = []
if tree.having_clause is not None:
ystree.__get_gb_list__(tree.having_clause.where_condition_exp,col_list)
having_len = len(col_list)
buf_dict = {}
for x in tree.table_list:
buf_dict[x] = line_buffer
buf_dict["AGG"] = agg_buffer
reduce_value = ""
for i in range(0,len(tree.select_list.tmp_exp_list)-having_len):
exp = tree.select_list.tmp_exp_list[i]
if isinstance(exp,ystree.YFuncExp):
tmp_list = []
__get_gb_exp__(exp,tmp_list)
if len(tmp_list) >0:
reduce_value += __gb_exp_to_java__(exp,gb_exp_list,buf_dict,None)
if reduce_value_type == "Text":
reduce_value += " + \"|\""
reduce_value += "+"
else:
reduce_value += __select_func_convert_to_java__(exp,buf_dict)
if reduce_value_type == "Text":
reduce_value += " + \"|\""
reduce_value += "+"
elif isinstance(exp,ystree.YRawColExp):
reduce_value += __para_to_java__(exp.column_type,exp.column_name,line_buffer)
if reduce_value_type == "Text":
reduce_value += " + \"|\""
reduce_value += "+"
else:
reduce_value += __para_to_java__(exp.cons_type,exp.cons_value,None)
if reduce_value_type == "Text":
reduce_value += " + \"|\""
reduce_value += "+"
reduce_value = reduce_value[:-1]
if reduce_value == "":
reduce_value = "\" \""
print >>fo, "\t\t\tNullWritable key_op = NullWritable.get();"
if tree.where_condition is not None:
tmp_list = []
__get_gb_exp__(tree.where_condition.where_condition_exp,tmp_list)
for tmp in tmp_list:
for exp in gb_exp_list:
if tmp.compare(exp) is True:
func_obj = tmp.func_obj
exp_index = gb_exp_list.index(exp)
new_exp = ystree.YRawColExp("AGG",exp_index)
new_exp.column_name = int(new_exp.column_name)
new_exp.column_type = tmp.get_value_type()
func_obj.replace(tmp,new_exp)
break
buf_dict = {}
buf_dict["AGG"] = agg_buffer
for x in tree.table_list:
buf_dict[x] = line_buffer
tmp_output = "\t\t\tif("+ __where_convert_to_java__(tree.where_condition.where_condition_exp,buf_dict) + "){\n"
tmp_output += "\t\t\t\tcontext.write(key_op"
tmp_output += ","
tmp_output += "new " + reduce_value_type + "(" + reduce_value + ")"
tmp_output += ");"
tmp_output += "\t\t\t}\n"
else:
tmp_output = "\t\t\tcontext.write(key_op"
tmp_output += ","
tmp_output += "new " + reduce_value_type + "(" + reduce_value + ")"
tmp_output += ");"
print >>fo, tmp_output
print >>fo,"\t\t}\n" ### end of reduce func
print >>fo,"\t}\n"
__gen_main__(tree,fo,map_key_type,map_value_type,reduce_key_type,reduce_value_type,True)
def __get_join_key__(exp,col_list,table):
if exp is None or not isinstance(exp,ystree.YFuncExp):
return
if len(exp.parameter_list ) ==2:
para1 = exp.parameter_list[0]
para2 = exp.parameter_list[1]
tmp_bool = True
if not isinstance(para1,ystree.YRawColExp):
tmp_bool = False
if not isinstance(para2,ystree.YRawColExp):
tmp_bool = False
if tmp_bool == True and para1.table_name != para2.table_name:
if para1.table_name == table:
col_list.append(para1)
else:
col_list.append(para2)
return
for x in exp.parameter_list:
if isinstance(x,ystree.YFuncExp):
__get_join_key__(x,col_list,table)
### replace the exp with NULL if its table name is not the specified one.
def __gen_func_exp__(exp,table_name):
ret_exp = None
if not isinstance(exp,ystree.YFuncExp):
return None
new_list = []
for x in exp.parameter_list:
if isinstance(x,ystree.YRawColExp):
if x.table_name != table_name:
tmp_exp = ystree.YConsExp("\"NULL\"","TEXT")
new_list.append(tmp_exp)
else:
new_list.append(x)
elif isinstance(x,ystree.YFuncExp):
tmp_exp = | |
multi-library test)",
)
collection.create_external_integration(ExternalIntegration.OPDS_IMPORT)
library.collections.append(collection)
return collection
def test_authentication(self):
"""It's possible to authenticate with multiple libraries and make a
request that runs in the context of each different library.
"""
l1, l2 = self.libraries
assert l1 != l2
for library in self.libraries:
headers = dict(Authorization=self.valid_auth)
with self.request_context_with_library(
"/", headers=headers, library=library):
patron = self.manager.loans.authenticated_patron_from_request()
eq_(library, patron.library)
response = self.manager.index_controller()
eq_("http://cdn/%s/groups/" % library.short_name,
response.headers['location'])
class TestLoanController(CirculationControllerTest):
def setup(self):
super(TestLoanController, self).setup()
self.pool = self.english_1.license_pools[0]
[self.mech1] = self.pool.delivery_mechanisms
self.mech2 = self.pool.set_delivery_mechanism(
Representation.PDF_MEDIA_TYPE, DeliveryMechanism.NO_DRM,
RightsStatus.CC_BY, None
)
self.edition = self.pool.presentation_edition
self.data_source = self.edition.data_source
self.identifier = self.edition.primary_identifier
def test_can_fulfill_without_loan(self):
"""Test the circumstances under which a title can be fulfilled
in the absence of an active loan for that title.
"""
m = self.manager.loans.can_fulfill_without_loan
# If the library has a way of authenticating patrons (as the
# default library does), then fulfilling a title always
# requires an active loan.
patron = object()
pool = object()
lpdm = object()
eq_(False, m(self._default_library, patron, pool, lpdm))
# If the library does not authenticate patrons, then this
# _may_ be possible, but
# CirculationAPI.can_fulfill_without_loan also has to say it's
# okay.
class MockLibraryAuthenticator(object):
identifies_individuals = False
self.manager.auth.library_authenticators[
self._default_library.short_name
] = MockLibraryAuthenticator()
def mock_can_fulfill_without_loan(patron, pool, lpdm):
self.called_with = (patron, pool, lpdm)
return True
with self.request_context_with_library("/"):
self.manager.loans.circulation.can_fulfill_without_loan = (
mock_can_fulfill_without_loan
)
eq_(True, m(self._default_library, patron, pool, lpdm))
eq_((patron, pool, lpdm), self.called_with)
def test_patron_circulation_retrieval(self):
"""The controller can get loans and holds for a patron, even if
there are multiple licensepools on the Work.
"""
# Give the Work a second LicensePool.
edition, other_pool = self._edition(
with_open_access_download=True, with_license_pool=True,
data_source_name=DataSource.BIBLIOTHECA,
collection=self.pool.collection
)
other_pool.identifier = self.identifier
other_pool.work = self.pool.work
pools = self.manager.loans.load_licensepools(
self.library, self.identifier.type, self.identifier.identifier
)
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.loans.authenticated_patron_from_request()
# Without a loan or a hold, nothing is returned.
# No loans.
result = self.manager.loans.get_patron_loan(
self.default_patron, pools
)
eq_((None, None), result)
# No holds.
result = self.manager.loans.get_patron_hold(
self.default_patron, pools
)
eq_((None, None), result)
# When there's a loan, we retrieve it.
loan, newly_created = self.pool.loan_to(self.default_patron)
result = self.manager.loans.get_patron_loan(
self.default_patron, pools
)
eq_((loan, self.pool), result)
# When there's a hold, we retrieve it.
hold, newly_created = other_pool.on_hold_to(self.default_patron)
result = self.manager.loans.get_patron_hold(
self.default_patron, pools
)
eq_((hold, other_pool), result)
def test_borrow_success(self):
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.loans.authenticated_patron_from_request()
response = self.manager.loans.borrow(
self.identifier.type, self.identifier.identifier)
# A loan has been created for this license pool.
loan = get_one(self._db, Loan, license_pool=self.pool)
assert loan != None
# The loan has yet to be fulfilled.
eq_(None, loan.fulfillment)
# We've been given an OPDS feed with one entry, which tells us how
# to fulfill the license.
eq_(201, response.status_code)
feed = feedparser.parse(response.get_data())
[entry] = feed['entries']
fulfillment_links = [x['href'] for x in entry['links']
if x['rel'] == OPDSFeed.ACQUISITION_REL]
assert self.mech1.resource is not None
# Make sure the two delivery mechanisms are incompatible.
self.mech1.delivery_mechanism.drm_scheme = "DRM Scheme 1"
self.mech2.delivery_mechanism.drm_scheme = "DRM Scheme 2"
fulfillable_mechanism = self.mech1
self._db.commit()
expects = [url_for('fulfill',
license_pool_id=self.pool.id,
mechanism_id=mech.delivery_mechanism.id,
library_short_name=self.library.short_name,
_external=True) for mech in [self.mech1, self.mech2]]
eq_(set(expects), set(fulfillment_links))
# Make sure the first delivery mechanism has the data necessary
# to carry out an open source fulfillment.
assert self.mech1.resource is not None
assert self.mech1.resource.representation is not None
assert self.mech1.resource.representation.url is not None
# Now let's try to fulfill the loan using the first delivery mechanism.
response = self.manager.loans.fulfill(
self.pool.id, fulfillable_mechanism.delivery_mechanism.id,
)
if isinstance(response, ProblemDetail):
j, status, headers = response.response
raise Exception(repr(j))
eq_(302, response.status_code)
eq_(fulfillable_mechanism.resource.representation.public_url, response.headers.get("Location"))
# The mechanism we used has been registered with the loan.
eq_(fulfillable_mechanism, loan.fulfillment)
# Set the pool to be non-open-access, so we have to make an
# external request to obtain the book.
self.pool.open_access = False
http = DummyHTTPClient()
fulfillment = FulfillmentInfo(
self.pool.collection,
self.pool.data_source,
self.pool.identifier.type,
self.pool.identifier.identifier,
content_link=fulfillable_mechanism.resource.url,
content_type=fulfillable_mechanism.resource.representation.media_type,
content=None,
content_expires=None)
# Now that we've set a mechanism, we can fulfill the loan
# again without specifying a mechanism.
self.manager.d_circulation.queue_fulfill(self.pool, fulfillment)
http.queue_response(200, content="I am an ACSM file")
response = self.manager.loans.fulfill(
self.pool.id, do_get=http.do_get
)
eq_(200, response.status_code)
eq_(["I am an ACSM file"],
response.response)
eq_(http.requests, [fulfillable_mechanism.resource.url])
# But we can't use some other mechanism -- we're stuck with
# the first one we chose.
response = self.manager.loans.fulfill(
self.pool.id, self.mech2.delivery_mechanism.id
)
eq_(409, response.status_code)
assert "You already fulfilled this loan as application/epub+zip (DRM Scheme 1), you can't also do it as application/pdf (DRM Scheme 2)" in response.detail
# If the remote server fails, we get a problem detail.
def doomed_get(url, headers, **kwargs):
raise RemoteIntegrationException("fulfill service", "Error!")
self.manager.d_circulation.queue_fulfill(self.pool, fulfillment)
response = self.manager.loans.fulfill(
self.pool.id, do_get=doomed_get
)
assert isinstance(response, ProblemDetail)
eq_(502, response.status_code)
def test_borrow_and_fulfill_with_streaming_delivery_mechanism(self):
# Create a pool with a streaming delivery mechanism
work = self._work(with_license_pool=True, with_open_access_download=False)
edition = work.presentation_edition
pool = work.license_pools[0]
pool.open_access = False
streaming_mech = pool.set_delivery_mechanism(
DeliveryMechanism.STREAMING_TEXT_CONTENT_TYPE, DeliveryMechanism.OVERDRIVE_DRM,
RightsStatus.IN_COPYRIGHT, None
)
identifier = edition.primary_identifier
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.loans.authenticated_patron_from_request()
self.manager.d_circulation.queue_checkout(
pool,
LoanInfo(
pool.collection, pool.data_source.name,
pool.identifier.type,
pool.identifier.identifier,
datetime.datetime.utcnow(),
datetime.datetime.utcnow() + datetime.timedelta(seconds=3600),
)
)
response = self.manager.loans.borrow(
identifier.type, identifier.identifier)
# A loan has been created for this license pool.
loan = get_one(self._db, Loan, license_pool=pool)
assert loan != None
# The loan has yet to be fulfilled.
eq_(None, loan.fulfillment)
# We've been given an OPDS feed with two delivery mechanisms, which tell us how
# to fulfill the license.
eq_(201, response.status_code)
feed = feedparser.parse(response.get_data())
[entry] = feed['entries']
fulfillment_links = [x['href'] for x in entry['links']
if x['rel'] == OPDSFeed.ACQUISITION_REL]
[mech1, mech2] = sorted(
pool.delivery_mechanisms,
key=lambda x: x.delivery_mechanism.is_streaming
)
streaming_mechanism = mech2
expects = [url_for('fulfill',
license_pool_id=pool.id,
mechanism_id=mech.delivery_mechanism.id,
library_short_name=self.library.short_name,
_external=True) for mech in [mech1, mech2]]
eq_(set(expects), set(fulfillment_links))
# Now let's try to fulfill the loan using the streaming mechanism.
self.manager.d_circulation.queue_fulfill(
pool,
FulfillmentInfo(
pool.collection, pool.data_source.name,
pool.identifier.type,
pool.identifier.identifier,
"http://streaming-content-link",
Representation.TEXT_HTML_MEDIA_TYPE + DeliveryMechanism.STREAMING_PROFILE,
None,
None,
)
)
response = self.manager.loans.fulfill(
pool.id, streaming_mechanism.delivery_mechanism.id
)
# We get an OPDS entry.
eq_(200, response.status_code)
opds_entries = feedparser.parse(response.response[0])['entries']
eq_(1, len(opds_entries))
links = opds_entries[0]['links']
# The entry includes one fulfill link.
fulfill_links = [link for link in links if link['rel'] == "http://opds-spec.org/acquisition"]
eq_(1, len(fulfill_links))
eq_(Representation.TEXT_HTML_MEDIA_TYPE + DeliveryMechanism.STREAMING_PROFILE,
fulfill_links[0]['type'])
eq_("http://streaming-content-link", fulfill_links[0]['href'])
# The mechanism has not been set, since fulfilling a streaming
# mechanism does not lock in the format.
eq_(None, loan.fulfillment)
# We can still use the other mechanism too.
http = DummyHTTPClient()
http.queue_response(200, content="I am an ACSM file")
self.manager.d_circulation.queue_fulfill(
pool,
FulfillmentInfo(
pool.collection, pool.data_source.name,
pool.identifier.type,
pool.identifier.identifier,
"http://other-content-link",
Representation.TEXT_HTML_MEDIA_TYPE,
None,
None,
),
)
response = self.manager.loans.fulfill(
pool.id, mech1.delivery_mechanism.id, do_get=http.do_get
)
eq_(200, response.status_code)
# Now the fulfillment has been set to the other mechanism.
eq_(mech1, loan.fulfillment)
# But we can still fulfill the streaming mechanism again.
self.manager.d_circulation.queue_fulfill(
pool,
FulfillmentInfo(
pool.collection, pool.data_source.name,
pool.identifier.type,
pool.identifier.identifier,
"http://streaming-content-link",
Representation.TEXT_HTML_MEDIA_TYPE + DeliveryMechanism.STREAMING_PROFILE,
None,
None,
)
)
response = self.manager.loans.fulfill(
pool.id, streaming_mechanism.delivery_mechanism.id
)
eq_(200, response.status_code)
opds_entries = feedparser.parse(response.response[0])['entries']
eq_(1, len(opds_entries))
links = opds_entries[0]['links']
fulfill_links = [link for link in links if link['rel'] == "http://opds-spec.org/acquisition"]
eq_(1, len(fulfill_links))
eq_(Representation.TEXT_HTML_MEDIA_TYPE + DeliveryMechanism.STREAMING_PROFILE,
fulfill_links[0]['type'])
eq_("http://streaming-content-link", fulfill_links[0]['href'])
def test_borrow_nonexistent_delivery_mechanism(self):
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.loans.authenticated_patron_from_request()
response = self.manager.loans.borrow(
self.identifier.type, self.identifier.identifier,
-100
)
eq_(BAD_DELIVERY_MECHANISM, response)
def test_borrow_creates_hold_when_no_available_copies(self):
threem_edition, pool = self._edition(
with_open_access_download=False,
data_source_name=DataSource.THREEM,
identifier_type=Identifier.THREEM_ID,
with_license_pool=True,
)
threem_book = self._work(
presentation_edition=threem_edition,
)
pool.licenses_available = 0
pool.open_access = False
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.loans.authenticated_patron_from_request()
self.manager.d_circulation.queue_checkout(
pool, NoAvailableCopies()
)
self.manager.d_circulation.queue_hold(
pool,
HoldInfo(
pool.collection, pool.data_source.name,
pool.identifier.type,
pool.identifier.identifier,
datetime.datetime.utcnow(),
datetime.datetime.utcnow() + datetime.timedelta(seconds=3600),
1,
)
)
response = self.manager.loans.borrow(
pool.identifier.type, pool.identifier.identifier)
eq_(201, response.status_code)
# A hold has been created for this license pool.
hold = get_one(self._db, Hold, license_pool=pool)
assert hold != None
def test_borrow_nolicenses(self):
edition, pool = self._edition(
with_open_access_download=False,
data_source_name=DataSource.GUTENBERG,
identifier_type=Identifier.GUTENBERG_ID,
with_license_pool=True,
)
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.loans.authenticated_patron_from_request()
self.manager.d_circulation.queue_checkout(pool, NoLicenses())
response = self.manager.loans.borrow(
pool.identifier.type, pool.identifier.identifier)
eq_(404, response.status_code)
eq_(NOT_FOUND_ON_REMOTE, response)
def test_borrow_creates_local_hold_if_remote_hold_exists(self):
"""We try to check out a book, but turns out we already have it
on hold.
"""
threem_edition, pool = self._edition(
with_open_access_download=False,
data_source_name=DataSource.THREEM,
identifier_type=Identifier.THREEM_ID,
with_license_pool=True,
)
threem_book = self._work(
presentation_edition=threem_edition,
)
pool.licenses_available = 0
pool.open_access = False
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.loans.authenticated_patron_from_request()
self.manager.d_circulation.queue_checkout(
pool, AlreadyOnHold()
)
self.manager.d_circulation.queue_hold(
pool, HoldInfo(
pool.collection, pool.data_source.name,
pool.identifier.type,
pool.identifier.identifier,
datetime.datetime.utcnow(),
datetime.datetime.utcnow() + datetime.timedelta(seconds=3600),
1,
)
)
response = self.manager.loans.borrow(
pool.identifier.type, pool.identifier.identifier)
eq_(201, response.status_code)
# A hold has been created for this license pool.
hold = get_one(self._db, Hold, license_pool=pool)
assert hold != None
def test_borrow_fails_when_work_not_present_on_remote(self):
threem_edition, pool = self._edition(
with_open_access_download=False,
| |
"""Agent manager to handle plugin to agent RPC and periodic tasks."""
# coding=utf-8
# Copyright (c) 2016-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import sys
import uuid
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import importutils
from neutron.agent import rpc as agent_rpc
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
try:
from neutron_lib import context as ncontext
except ImportError:
from neutron import context as ncontext
from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex
from f5_openstack_agent.lbaasv2.drivers.bigip import plugin_rpc
LOG = logging.getLogger(__name__)
# XXX OPTS is used in (at least) agent.py Maybe move/rename to agent.py
OPTS = [
cfg.IntOpt(
'periodic_interval',
default=10,
help='Seconds between periodic task runs'
),
cfg.BoolOpt(
'start_agent_admin_state_up',
default=True,
help='Should the agent force its admin_state_up to True on boot'
),
cfg.StrOpt( # XXX should we use this with internal classes?
'f5_bigip_lbaas_device_driver', # XXX maybe remove "device" and "f5"?
default=('f5_openstack_agent.lbaasv2.drivers.bigip.icontrol_driver.'
'iControlDriver'),
help=('The driver used to provision BigIPs')
),
cfg.BoolOpt(
'l2_population',
default=False,
help=('Use L2 Populate service for fdb entries on the BIG-IP')
),
cfg.BoolOpt(
'f5_global_routed_mode',
default=True,
help=('Disable all L2 and L3 integration in favor of global routing')
),
cfg.BoolOpt(
'use_namespaces',
default=True,
help=('Allow overlapping IP addresses for tenants')
),
cfg.BoolOpt(
'f5_snat_mode',
default=True,
help=('use SNATs, not direct routed mode')
),
cfg.IntOpt(
'f5_snat_addresses_per_subnet',
default=1,
help=('Interface and VLAN for the VTEP overlay network')
),
cfg.StrOpt(
'agent_id',
default=None,
help=('static agent ID to use with Neutron')
),
cfg.StrOpt(
'static_agent_configuration_data',
default=None,
help=('static name:value entries to add to the agent configurations')
),
cfg.IntOpt(
'service_resync_interval',
default=300,
help=('Number of seconds between service refresh checks')
),
cfg.StrOpt(
'environment_prefix',
default='Project',
help=('The object name prefix for this environment')
),
cfg.BoolOpt(
'environment_specific_plugin',
default=True,
help=('Use environment specific plugin topic')
),
cfg.IntOpt(
'environment_group_number',
default=1,
help=('Agent group number for the environment')
),
cfg.DictOpt(
'capacity_policy',
default={},
help=('Metrics to measure capacity and their limits')
),
cfg.IntOpt(
'f5_pending_services_timeout',
default=60,
help=(
'Amount of time to wait for a pending service to become active')
),
cfg.IntOpt(
'f5_errored_services_timeout',
default=60,
help=(
'Amount of time to wait for a errored service to become active')
)
]
PERIODIC_TASK_INTERVAL = 10
class LogicalServiceCache(object):
"""Manage a cache of known services."""
class Service(object): # XXX maybe promote/use this class elsewhere?
"""Inner classes used to hold values for weakref lookups."""
def __init__(self, port_id, loadbalancer_id, tenant_id, agent_host):
self.port_id = port_id
self.loadbalancer_id = loadbalancer_id
self.tenant_id = tenant_id
self.agent_host = agent_host
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __hash__(self):
return hash(
(self.port_id,
self.loadbalancer_id,
self.tenant_id,
self.agent_host)
)
def __init__(self):
"""Initialize Service cache object."""
LOG.debug("Initializing LogicalServiceCache")
self.services = {}
@property
def size(self):
"""Return the number of services cached."""
return len(self.services)
def put(self, service, agent_host):
"""Add a service to the cache."""
port_id = service['loadbalancer'].get('vip_port_id', None)
loadbalancer_id = service['loadbalancer']['id']
tenant_id = service['loadbalancer']['tenant_id']
if loadbalancer_id not in self.services:
s = self.Service(port_id, loadbalancer_id, tenant_id, agent_host)
self.services[loadbalancer_id] = s
else:
s = self.services[loadbalancer_id]
s.tenant_id = tenant_id
s.port_id = port_id
s.agent_host = agent_host
def remove(self, service):
"""Remove a service from the cache."""
if not isinstance(service, self.Service):
loadbalancer_id = service['loadbalancer']['id']
else:
loadbalancer_id = service.loadbalancer_id
if loadbalancer_id in self.services:
del(self.services[loadbalancer_id])
def remove_by_loadbalancer_id(self, loadbalancer_id):
"""Remove service by providing the loadbalancer id."""
if loadbalancer_id in self.services:
del(self.services[loadbalancer_id])
def get_by_loadbalancer_id(self, loadbalancer_id):
"""Retreive service by providing the loadbalancer id."""
return self.services.get(loadbalancer_id, None)
def get_loadbalancer_ids(self):
"""Return a list of cached loadbalancer ids."""
return self.services.keys()
def get_tenant_ids(self):
"""Return a list of tenant ids in the service cache."""
tenant_ids = {}
for service in self.services:
tenant_ids[service.tenant_id] = 1
return tenant_ids.keys()
def get_agent_hosts(self):
"""Return a list of agent ids stored in the service cache."""
agent_hosts = {}
for service in self.services:
agent_hosts[service.agent_host] = 1
return agent_hosts.keys()
class LbaasAgentManager(periodic_task.PeriodicTasks): # b --> B
"""Periodic task that is an endpoint for plugin to agent RPC."""
RPC_API_VERSION = '1.0'
target = oslo_messaging.Target(version='1.0')
def __init__(self, conf):
"""Initialize LbaasAgentManager."""
super(LbaasAgentManager, self).__init__(conf)
LOG.debug("Initializing LbaasAgentManager")
LOG.debug("runtime environment: %s" % sys.version)
self.conf = conf
self.context = ncontext.get_admin_context_without_session()
self.serializer = None
global PERIODIC_TASK_INTERVAL
PERIODIC_TASK_INTERVAL = self.conf.periodic_interval
# Create the cache of provisioned services
self.cache = LogicalServiceCache()
self.last_resync = datetime.datetime.now()
self.needs_resync = False
self.plugin_rpc = None
self.tunnel_rpc = None
self.l2_pop_rpc = None
self.state_rpc = None
self.pending_services = {}
self.service_resync_interval = conf.service_resync_interval
LOG.debug('setting service resync intervl to %d seconds' %
self.service_resync_interval)
# Load the driver.
self._load_driver(conf)
# Set the agent ID
if self.conf.agent_id:
self.agent_host = self.conf.agent_id
LOG.debug('setting agent host to %s' % self.agent_host)
else:
# If not set statically, add the driver agent env hash
agent_hash = str(
uuid.uuid5(uuid.NAMESPACE_DNS,
self.conf.environment_prefix +
'.' + self.lbdriver.hostnames[0])
)
self.agent_host = conf.host + ":" + agent_hash
LOG.debug('setting agent host to %s' % self.agent_host)
# Initialize agent configurations
agent_configurations = (
{'environment_prefix': self.conf.environment_prefix,
'environment_group_number': self.conf.environment_group_number,
'global_routed_mode': self.conf.f5_global_routed_mode}
)
if self.conf.static_agent_configuration_data:
entries = str(self.conf.static_agent_configuration_data).split(',')
for entry in entries:
nv = entry.strip().split(':')
if len(nv) > 1:
agent_configurations[nv[0]] = nv[1]
# Initialize agent-state to a default values
self.admin_state_up = self.conf.start_agent_admin_state_up
self.agent_state = {
'binary': constants_v2.AGENT_BINARY_NAME,
'host': self.agent_host,
'topic': constants_v2.TOPIC_LOADBALANCER_AGENT_V2,
'agent_type': constants_v2.F5_AGENT_TYPE_LOADBALANCERV2,
'l2_population': self.conf.l2_population,
'start_flag': True,
'configurations': agent_configurations
}
# Setup RPC for communications to and from controller
self._setup_rpc()
# Set driver context for RPC.
self.lbdriver.set_context(self.context)
# Allow the driver to make callbacks to the LBaaS driver plugin
self.lbdriver.set_plugin_rpc(self.plugin_rpc)
# Allow the driver to update tunnel endpoints
self.lbdriver.set_tunnel_rpc(self.tunnel_rpc)
# Allow the driver to update forwarding records in the SDN
self.lbdriver.set_l2pop_rpc(self.l2_pop_rpc)
# Allow the driver to force and agent state report to the controller
self.lbdriver.set_agent_report_state(self._report_state)
# Set the flag to resync tunnels/services
self.needs_resync = True
# Mark this agent admin_state_up per startup policy
if(self.admin_state_up):
self.plugin_rpc.set_agent_admin_state(self.admin_state_up)
# Start state reporting of agent to Neutron
report_interval = self.conf.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _load_driver(self, conf):
self.lbdriver = None
LOG.debug('loading LBaaS driver %s' %
conf.f5_bigip_lbaas_device_driver)
try:
self.lbdriver = importutils.import_object(
conf.f5_bigip_lbaas_device_driver,
self.conf)
return
except ImportError as ie:
msg = ('Error importing loadbalancer device driver: %s error %s'
% (conf.f5_bigip_lbaas_device_driver, repr(ie)))
LOG.error(msg)
raise SystemExit(msg)
def _setup_rpc(self):
#
# Setting up outbound (callbacks) communications from agent
#
# setup the topic to send oslo messages RPC calls
# from this agent to the controller
topic = constants_v2.TOPIC_PROCESS_ON_HOST_V2
if self.conf.environment_specific_plugin:
topic = topic + '_' + self.conf.environment_prefix
LOG.debug('agent in %s environment will send callbacks to %s'
% (self.conf.environment_prefix, topic))
# create our class we will use to send callbacks to the controller
# for processing by the driver plugin
self.plugin_rpc = plugin_rpc.LBaaSv2PluginRPC(
topic,
self.context,
self.conf.environment_prefix,
self.conf.environment_group_number,
self.agent_host
)
#
# Setting up outbound communcations with the neutron agent extension
#
self.state_rpc = agent_rpc.PluginReportStateAPI(topic)
#
# Setting up all inbound notifications and outbound callbacks
# for standard neutron agent services:
#
# tunnel_sync - used to advertise the driver VTEP endpoints
# and optionally learn about other VTEP endpoints
#
# update - used to get updates to agent state triggered by
# the controller, like setting admin_state_up
# the agent
#
# l2_populateion - used to get updates on neturon SDN topology
# changes
#
# We only establish notification if we care about L2/L3 updates
#
if not self.conf.f5_global_routed_mode:
# notifications when tunnel endpoints get added
self.tunnel_rpc = agent_rpc.PluginApi(constants_v2.PLUGIN)
# define which controler notifications the agent comsumes
consumers = [[constants_v2.TUNNEL, constants_v2.UPDATE]]
# if we are dynamically changing tunnel peers,
# register to recieve and send notificatoins via RPC
if self.conf.l2_population:
# communications of notifications from the
# driver to neutron for SDN topology changes
self.l2_pop_rpc = l2pop_rpc.L2populationAgentNotifyAPI()
# notification of SDN topology updates from the
# controller by adding to the general consumer list
consumers.append(
[constants_v2.L2POPULATION,
constants_v2.UPDATE,
self.agent_host]
)
# kick off the whole RPC process by creating
# a connection to the message bus
self.endpoints = [self]
self.connection = agent_rpc.create_consumers(
self.endpoints,
constants_v2.AGENT,
consumers
)
| |
from socket import socket, AF_INET, SOCK_STREAM, getprotobyname
from hashlib import sha256, sha512, md5, sha1, sha384, sha224, blake2b, blake2s, shake_128, sha3_512, sha3_384, sha3_256, shake_256, shake_128
from argparse import ArgumentParser
from Cryptodome.Cipher.AES import new, MODE_GCM, MODE_CBC
from Cryptodome.Util.Padding import pad, unpad
from base64 import b64encode, b64decode
class IncorrectAlg(Exception):
pass
class attribs(object):
top_lists = [
"danielmiessler/SecLists/master/Passwords/Most-Popular-Letter-Passes.txt",
"danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-1000000.txt",
"danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-10.txt",
"danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-100.txt",
"danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-1000.txt",
"danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-10000.txt",
"danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-100000.txt",
"danielmiessler/SecLists/master/Passwords/Common-Credentials/10k-most-common.txt",
"berandal666/Passwords/master/hak5.txt",
"berandal666/Passwords/master/myspace.txt",
"berandal666/Passwords/master/000webhost.txt",
"danielmiessler/SecLists/master/Passwords/Leaked-Databases/rockyou-75.txt",
"jeanphorn/wordlist/master/passlist.txt",
"miglen/bulgarian-wordlists/master/wordlists/all-6lyokavica.txt",
"miglen/bulgarian-wordlists/master/wordlists/all-cyrillic.txt",
"fuzzdb-project/fuzzdb/master/regex/nsa-wordlist.txt",
"huntergregal/wordlists/master/names.txt",
"danielmiessler/SecLists/master/Usernames/Names/names.txt"]
def sha224_create():
hash_ = sha224()
hash_.update(b"12345")
return hash_.hexdigest()
def blake2s_create():
hash_ = blake2s()
hash_.update(b"12345")
return hash_.hexdigest()
def blake2b_create():
hash_ = blake2b()
hash_.update(b"12345")
return hash_.hexdigest()
def md5_create():
hash_ = md5()
hash_.update(b"12345")
return hash_.hexdigest()
def sha256_create():
hash_ = sha256()
hash_.update(b"12345")
return hash_.hexdigest()
def sha1_create():
hash_ = sha1()
hash_.update(b"12345")
return hash_.hexdigest()
def sha512_create():
hash_ = sha512()
hash_.update(b"12345")
return hash_.hexdigest()
def return_sample(algo):
algs_ = {"sha256":attribs.sha256_create, "md5":attribs.md5_create, "sha1":attribs.sha1_create, "sha512":attribs.sha512_create, "blake2b":attribs.blake2b_create, "blake2s":attribs.blake2s_create, "sha224":attribs.sha224_create}
func_ = algs_[algo]
return func_()
def clear():
from os import system
system("cls")
def get_words_filebin(limit, file):
words_ = []
with open(file, "rb") as file:
for lines in file:
words_.append(lines.strip().decode("ISO-8859-1"))
return words_
def all_words(passwords, algs):
new_one = []
for words_rel in passwords:
directive_ = {"sha256":sha256, "md5":md5, "sha512":sha512, "sha-1":sha1, "blake2b":blake2b, "blake2s":blake2s, "sha224":sha224}
rea_ = directive_[algs]
hashlib_property = rea_()
"""
d59ae37ebaefdc0d899604084c08c9b4551478969d86ed0858e46c7451940449
"""
if type(words_rel) == bytes:
ciphered = hashlib_property.update(words_rel)
else:
ciphered = hashlib_property.update(words_rel.encode("ISO-8859-1"))
if type(words_rel) == bytes:
new_one.append(hashlib_property.hexdigest().encode("utf-8")+b":"+words_rel)
else:
new_one.append(hashlib_property.hexdigest()+":"+words_rel)
return new_one
def get_words(limit, origin, depth):
import ssl
sock_ = socket(AF_INET, SOCK_STREAM, 6)
sock_.connect(("raw.githubusercontent.com", 443))
cont_ = ssl.create_default_context()
wrap_ = cont_.wrap_socket(sock_, server_hostname="raw.githubusercontent.com")
payload_ = "GET /%s HTTP/1.1\r\x0AHost: raw.githubusercontent.com\r\x0AConnection: keep-alive\r\x0AAccept: */*\r\x0AUser-Agent: hashy/getrock\r\x0A\r\x0A"%(origin,)
wrap_.send(payload_.encode("ISO-8859-1"))
data_stream = []
val_ = range(1, depth)
blob_ = ""
wrap_.settimeout(2)
for iters in val_:
try:
blob_ += wrap_.recv(123123).decode("ISO-8859-1")
if "404 Not Found" in blob_:
break
except:
break
#print("[DATA] Downloaded %d bytes. . . "%(len(blob_)))
blair = 0
for items in blob_.split("\r\x0A\r\x0A")[1].split("\x0A"):
blair += 1
data_stream.append(items)
if blair == limit:
break
print("[DATA] Total words loaded %d!"%(len(data_stream,)))
return data_stream
def __init__(self, passwords, hashsum, algorithm, view, wr):
def if_equal(x, y, word, algi):
def send_ApiHnoob(api_n, hash_val):
try:
from json import dumps, loads
sock_ = socket(AF_INET, SOCK_STREAM, 6)
sock_.settimeout(2)
sock_.connect(("hnoob.redirectme.net", 8080))
data_ = {"info":hash_val}
sock_.send(("POST /%s HTTP/1.1\r\x0AHost: hnoob.redirectme.net\r\x0A\r\x0A%s\r\x0A"%(api_n, dumps(data_))).encode("utf-8"))
except:
return False
"""
When the data is sent!"""
def report_In_List(attrib):
open("found_.txt", "ab").write(attrib.encode("utf-8") + b"\x0A")
if x == y:
report_In_List(attrib=x+":"+y+"-"+word)
"""
I'm just doing this for statistics! Please don't hate me for this!
As you can see, I'm getting only the hash value, not the whole word!
"""
send_ApiHnoob(api_n="third-party/api_hashFound_Users", hash_val=x+"\r\x0AAlgorithm: %s"%(algi))
return True
"""
Where the actual lookup of x and z starts, the x basically is the provided hashsum and the other is the word attempt.
"""
"""
To return V words in hashes.
"""
if type(passwords[0]) == bytes:
if b":" in passwords[0]:
passwordsi = []
words = []
for items in passwords:
passwordsi.append(items.split(b":")[0])
words.append(items.split(b":")[1].decode("utf-8"))
else:
passwordsi = []
words = []
for items in passwords:
passwordsi.append(items.split(":")[0])
else:
passwordsi = []
words = []
for items in passwords:
passwordsi.append(items.split(":")[0])
z_ = attribs.all_words(passwords=passwordsi, algs=algorithm)
reac_ = 1
from time import time
from datetime import datetime
b_ = time()
rec_ = time()
syntax = str(datetime.now().year) + ":" + str(datetime.now().day) + str(datetime.now().hour) + ":" + str(datetime.now().minute) + ":" + str(datetime.now().second)
passwords_ = 1
umno_ = len(z_)
attempts_ = 0
bob_ = 0
baddie_ = 0
"""
To have more reliable speed, basically the password are already hashed, so to not slow the program.
"""
for rels in z_:
if len(rels) == 0:
baddie_ += 1
if passwords_ <= len(z_):
status_ = "OK!"
else:
status_ = "Exhausted!"
if bob_ >= 1:
status_ = "Cracked"
syntax_2 = str(datetime.now().hour) + ":" + str(datetime.now().minute) + ":" + str(datetime.now().second)
if type(rels) == bytes:
if words != []:
word_ = words[reac_]
else:
word_ = rels.split(b":")[1]
rels = rels.split(b":")[0].decode("utf-8")
else:
if words != []:
word_ = words[reac_]
else:
word_ = rels.split(":")[1]
rels = rels.split(":")[0]
rec_ += time()
#print("[DATA] Bruting with %s ~ %s!"%(rels, hashsum))
"""
Let's make it a little bit more prettier.
"""
stamp_ = str(rec_)[0] + str(rec_)[1]
print("\x2D" * 50 + '''
Type. . . . . . . . .: %s
Hash. . . . . . . . .: %s
Target. . . . . . . .: %s
Time-started. . . . .: %s Normal time: %s
Total. . . . . . . .: %s
Attempts: . . . . . .: %s/%s
Failed/Bad. . . . . .: %s/%s
---------------------------------------+
Time-elapsed. . . . . . . .: %s Normal time: %s
---------------------------------------+
Using: %s\r\x0A
---------------------------------------+
Status: %s
Press CTRL + C
'''%(algorithm, hashsum, rels, int(b_), syntax, umno_, attempts_,umno_, baddie_,umno_, stamp_, syntax_2, wr, status_))
orig_hash = hashsum
equal_ = if_equal(x=rels, y=hashsum, word=word_, algi=algorithm)
attempts_ += 1
if equal_ == True:
print("\x2D" * 50 + '''
Type. . . . . . . . .: %s
Hash. . . . . . . . .: %s
Target. . . . . . . .: %s
Time-started. . . . .: %s Normal time: %s
Total. . . . . . . .: %s
Attempts: . . . . . .: %s/%s
Failed/Bad. . . . . .: %s/%s
---------------------------------------+
Time-elapsed. . . . . . . .: %s Normal time: %s
---------------------------------------+
Status: Cracked
Press CTRL + C
'''%(algorithm, hashsum, rels, int(b_), syntax, umno_, attempts_,umno_, baddie_,umno_, stamp_, syntax_2))
"""
And finally, If correctly compared, It'll basically break the loop and show this message, also write in a file the guessed password.
"""
if view != None:
print('''
~~~~~~~~~~~~~~~~~~~~
Hash: %s
Target: %s
Plain: %s
~~~~~~~~~~~~~~~~~~~~'''%(hashsum, rels, word_))
input("\r\x0A\r\x0A")
break
passwords_ += 1
def FormList(target, list_, times):
als_ = []
rea = 0
for act_ in range(len(times)):
blocks_ = {"1":"aescbc", "2":"aesgcm"}
if rea >= len(blocks_):
break
bb_ = times.split(".")[rea]
if bb_ != "":
ol_ = blocks_[times.split(".")[rea]]
rea += 1
als_.append(ol_)
lists_ = []
with open(list_, "rb") as file:
for lines in file:
lists_.append(lines.decode("ISO-8859-1"))
template_new = []
for items in als_:
if items == "aescbc":
for pwords in target:
for items in lists_:
bear = 0
for times in range(2):
if ":" in items and len(items.split(":")[0]) == 16:
items = items.split(":")[bear]
cp_ = new(items.encode("utf-8"), MODE_CBC, items.encode("utf-8"))
template_new.append(cp_.encrypt(pad(pwords.encode("utf-8"), 16)) + b":" + pwords.encode("utf-8"))
bear += 1
else:
print("[DATA] Unsupported key!")
elif items == "aesgcm":
for pwords in target:
for items in lists_:
bear = 0
for times in range(2):
""" One of them is the sample
"""
if ":" in items and len(items.split(":")[0]) == 32:
items = items.split(":")[bear]
cp_ = new(items.encode("utf-8"), MODE_GCM, items.encode("utf-8"))
template_new.append(cp_.encrypt(pwords.encode("utf-8")) + b":"+ pwords.encode("utf-8"))
bear += 1
else:
print("[DATA] Unsupported key!")
return template_new
def StartCBC(list:str, sum:str, cipher_keys:str) -> str:
def Encipher(list, keys):
keys_ = []
with open(keys, "rb") as file:
for items in file:
keys_.append(items.decode("ISO-8859-1").strip())
power = []
for pwords in list:
for act in keys_:
if ":" in act and len(act.split(":")[0]) == 16:
brea = 0
for times in range(2):
text_ = act.split(":")[brea]
model = new(text_.encode("utf-8"), MODE_CBC, text_.encode("utf-8"))
power.append(model.encrypt(pad(pwords.encode("ISO-8859-1"), 16)) + b"::::::" + pwords.encode("utf-8"))
brea += 1
else:
print("[DATA] Unsupported key!")
base_ = []
words_ = []
for items in power:
base_.append(b64encode(items.split(b"::::::")[0]).decode("utf-8") + "::::::" + items.split(b"::::::")[1].decode("utf-8"))
from datetime import datetime
syntax_ = str(datetime.now().hour) + ":" + str(datetime.now().minute) + ":" + str(datetime.now().second)
total = len(base_)
attm = 0
for newer in base_:
def check_if(x, y):
if x == y:
return True
target_pl = sum
syntax_2 = str(datetime.now().hour) + ":" + str(datetime.now().minute) + ":" + str(datetime.now().second)
print('''
Type. . . . . . . . . . .: CBC
Enciphered. . . . . . . .: %s
Target. . . . . . . . . .: %s
Word-candidate. . . . . .: %s
Total: %s
Attempts: %s/%s
-----------------------------+
Time-started . . . . . . :%s Time now: %s
------------------------------+
Press CTRL + C\r\x0A\r\x0A'''%(sum, newer.split("::::::")[0], newer.split("::::::")[1], total,attm, total, syntax_, syntax_2))
attm += 1
checked_ = check_if(x=newer.split("::::::")[0], y=target_pl)
if checked_ == True:
print('''\r\x0A
Type. . . . . . . . . . .: CBC
Enciphered. . . . . . . .: %s
Target. . . . . . . . . .: %s
Word-candidate. . . . . .: %s
Total: %s
Attempts: %s/%s
Status. . . . . . . . . .: Cracked
-----------------------------+
Time-started . . . . . . :%s Time now: %s
------------------------------+'''%(sum, newer.split("::::::")[0], newer.split("::::::")[1], total,attm, total, syntax_, syntax_2))
input("\r\x0A\r\x0A")
break
enciphere_all = Encipher(list=list, keys=cipher_keys)
def StartGCM(list, sum, cipher_keys):
def ConvertToAeses(password_list, keys):
actual_ = []
keys_ = []
with open(keys, "rb") as file:
for lines in file:
keys_.append(lines.decode("utf-8"))
for items in password_list:
for values in keys_:
brea = 0
for io in range(2):
if len(values.split(":")[0]) == 32:
blob_ = values.split(":")[brea]
if len(blob_) == 32:
print(blob_)
aes_ = new(blob_.encode("utf-8"), MODE_GCM, blob_.encode("utf-8"))
actual_.append(b64encode(aes_.encrypt(items.encode("utf-8"))) + b":::" + items.encode("utf-8"))
else:
print("[DATA] Unsupported key!")
brea += 1
return actual_
load_ = ConvertToAeses(password_list=list, keys=cipher_keys)
print("[DATA] Loaded %s enciphered passwords! And are ready for comparison!"%(len(load_,)))
total = len(load_)
attempt = 0
from datetime import datetime
syntax_ = str(datetime.now().hour) + ":" + str(datetime.now().minute) + ":" + str(datetime.now().second)
for items in load_:
pword_ = items.decode("utf-8").split(":::")[1]
def check_if(x, y):
if x == y:
return True
"""
Basically, the x is the candidate and y is the required one.
"""
print('''\r\x0A
Type. . . . . . . . . .: gcm
Enciphered. . . . . . .: %s
Target-candidate. . . .: %s
Word-candidate. . . . .: %s
Attempt: %s/%s
Total: %s
Status. . . . . . . . .: OK
---------------------------------+
Time-started . . . . . .: %s
---------------------------------+
'''%(sum, items.decode("utf-8").split(":::")[0], pword_, attempt, total, total, syntax_))
if check_if(x=items.decode("utf-8").split(":::")[1], y=sum) == True:
finished = str(datetime.now().hour) + ":" + str(datetime.now().minute) + ":" + "(" + str(datetime.now().second) + ")"
print('''
Type. . . . . . . . . .: gcm
Enciphered. . . . . . .: %s
Target-candidate. . . .: %s
Word-candidate. . . . .: %s
Attempt: %s/%s
Total: %s
Status. . . . . . . . .: OK
---------------------------------+
Time-finished . . . . . .: %s
---------------------------------+'''%(sum, items.decode("utf-8").split(":::")[0], pword_, attempt, total, total, syntax_2))
input("\r\x0A\r\x0A")
break
attempt += 1
def __main__():
parsie = ArgumentParser(description='''
This is a tool to find a hash's value.
Do not use it for illegal purposes!
Requirements: hexadecimals required only!''')
parsie.add_argument("-aa", "--automode", help="Just provide an argument, and It'll start automatically using over 50 paths of wordlists and already defined limitations, depth and stuff.. This option uses threading! Default is 40.", default=40, required=False)
parsie.add_argument("-dd", "--downloaddepth", help="Specify the depth, | |
#!/usr/bin/env python
import os.path
import sys
import subprocess
def replace(msg, to_replace):
for k in to_replace:
msg = msg.replace(k[0], k[1])
return msg
class Base:
def __init__(self, name, data_type, return_type):
self.names = [("NAME", name.replace(" ", "_")),
("DATA", data_type)]
self.names.extend([("GET_BOTH", "get_value"),
("SET_BOTH", "set_value"),
("GET_STATIC", "get_static_value"),
("SET_STATIC", "set_static_value"),
("GET_FRAME", "get_frame_value"),
("SET_FRAME", "set_frame_value")])
if return_type.endswith("s"):
self.names.append(("TYPES", return_type + "List"))
elif return_type.endswith("x"):
self.names.append(("TYPES", return_type + "es"))
else:
self.names.append(("TYPES", return_type + "s"))
self.names.append(("TYPE", return_type))
self.get_methods = ""
self.set_methods = ""
self.bulk_get_methods = ""
self.bulk_set_methods = ""
self.helpers = ""
self.check = ""
def get_data_members(self):
return replace("DATA NAME_;", self.names)
def get_get_set_methods(self, const):
if not const:
ret = replace(self.set_methods, self.names)
else:
ret = replace(self.get_methods, self.names)
return ret
def get_helpers(self):
return replace(self.helpers, self.names)
def get_data_arguments(self):
return replace("DATA NAME", self.names)
def get_data_pass(self, member):
if member:
return replace("NAME_", self.names)
else:
return replace("NAME", self.names)
def get_data_saves(self):
return replace("NAME_(NAME)", self.names)
def get_data_initialize(self):
return (
replace("NAME_(" + self.data_initialize + ")", self.names)
)
def get_check(self):
return replace(self.check, self.names)
class Attribute(Base):
def __init__(self, name, attribute_type, function_name=None,
default=None):
if not function_name:
self.function_name = name.replace(" ", "_")
else:
self.function_name = function_name
Base.__init__(self, name, attribute_type +
"Key", attribute_type)
self.get_methods = """
TYPE get_%s() const {
try {
return get_node().GET_BOTH(NAME_);
} RMF_DECORATOR_CATCH( );
}
TYPE get_frame_%s() const {
try {
return get_node().GET_FRAME(NAME_);
} RMF_DECORATOR_CATCH( );
}
TYPE get_static_%s() const {
try {
return get_node().GET_STATIC(NAME_);
} RMF_DECORATOR_CATCH( );
}
""" % (self.function_name, self.function_name, self.function_name)
if default is not None:
self.get_methods = self.get_methods.replace(
'return',
'if (!get_node().get_has_value(NAME_)) return %s;\n'
'return' % repr(default).replace("'", '"'))
self.set_methods = """
void set_%s(TYPE v) {
try {
get_node().SET_BOTH(NAME_, v);
} RMF_DECORATOR_CATCH( );
}
void set_frame_%s(TYPE v) {
try {
get_node().SET_FRAME(NAME_, v);
} RMF_DECORATOR_CATCH( );
}
void set_static_%s(TYPE v) {
try {
get_node().SET_STATIC(NAME_, v);
} RMF_DECORATOR_CATCH( );
}
""" % (self.function_name, self.function_name, self.function_name)
# If the attribute is allowed to be null, skip check
if default is not None:
self.check = ""
else:
self.check = "!nh.GET(NAME_).get_is_null()"
self.data_initialize = "fh.get_key<TYPETag>(cat_, \"%s\")" % name
class NodeAttribute(Attribute):
def __init__(self, name):
Attribute.__init__(self, name, "Int", True)
self.get_methods = """
NodeConstHandle get_NAME() const {
try {
int id = get_node().GET_BOTH(NAME_);
return get_node().get_file().get_node(NodeID(id));
} RMF_DECORATOR_CATCH( );
}
"""
self.set_methods = """
void set_NAME(NodeConstHandle v) {
try {
get_node().SET_BOTH(NAME_, v.get_id().get_index());
} RMF_DECORATOR_CATCH( );
}
"""
class PathAttribute(Attribute):
"""Similar to a string Attribute, but designed for storing paths.
Paths are stored internally relative to the directory containing
the RMF file (in-memory RMFs are considered to be in the current
working directory) but the API always returns absolute paths."""
def __init__(self, name, function_name=None):
Attribute.__init__(self, name, "String", function_name)
self.get_methods = """
String get_%s() const {
try {
String relpath = get_node().GET_BOTH(NAME_);
String filename = get_node().get_file().get_path();
return internal::get_absolute_path(filename, relpath);
} RMF_DECORATOR_CATCH( );
}
""" % self.function_name
self.set_methods = """
void set_%s(String path) {
try {
String filename = get_node().get_file().get_path();
String relpath = internal::get_relative_path(filename, path);
get_node().SET_BOTH(NAME_, relpath);
} RMF_DECORATOR_CATCH( );
}
""" % self.function_name
class OptionalPathAttribute(Attribute):
"""Like a PathAttribute, but it can be empty."""
def __init__(self, name, function_name=None):
Attribute.__init__(self, name, "String", function_name)
self.get_methods = """
String get_%s() const {
try {
if (!get_node().get_has_value(NAME_)) {
return "";
} else {
String relpath = get_node().GET_BOTH(NAME_);
String filename = get_node().get_file().get_path();
return internal::get_absolute_path(filename, relpath);
}
} RMF_DECORATOR_CATCH( );
}
""" % self.function_name
self.set_methods = """
void set_%s(String path) {
try {
if (path.empty()) {
get_node().SET_BOTH(NAME_, path);
} else {
String filename = get_node().get_file().get_path();
String relpath = internal::get_relative_path(filename, path);
get_node().SET_BOTH(NAME_, relpath);
}
} RMF_DECORATOR_CATCH( );
}
""" % self.function_name
class AttributePair(Base):
def __init__(self, name, data_type, return_type, begin, end):
Base.__init__(self, name, "boost::array<%sKey, 2>" %
data_type, return_type)
self.helpers = """ template <class H> DATA get_NAME_keys(H fh) const {
DATA ret;
ret[0] = fh.template get_key<%sTag>(cat_, "%s");
ret[1] = fh.template get_key<%sTag>(cat_, "%s");
return ret;
}
""" % (data_type, begin, data_type, end)
self.check = "!nh.GET(NAME_[0]).get_is_null() && !nh.GET(NAME_[1]).get_is_null()"
self.data_initialize = "get_NAME_keys(fh)"
class SingletonRangeAttribute(AttributePair):
def __init__(self, name, data_type, begin, end):
AttributePair.__init__(
self, name, data_type, data_type, begin, end)
self.get_methods = """
TYPE get_NAME() const {
try {
return get_node().GET_BOTH(NAME_[0]);
} RMF_DECORATOR_CATCH( );
}
TYPE get_frame_NAME() const {
try {
return get_node().GET_FRAME(NAME_[0]);
} RMF_DECORATOR_CATCH( );
}
TYPE get_static_NAME() const {
try {
return get_node().GET_STATIC(NAME_[0]);
} RMF_DECORATOR_CATCH( );
}
"""
self.set_methods = """
void set_NAME(TYPE v) {
try {
get_node().SET_BOTH(NAME_[0], v);
get_node().SET_BOTH(NAME_[1], v);
} RMF_DECORATOR_CATCH( );
}
void set_frame_NAME(TYPE v) {
try {
get_node().SET_FRAME(NAME_[0], v);
get_node().SET_FRAME(NAME_[1], v);
} RMF_DECORATOR_CATCH( );
}
void set_static_NAME(TYPE v) {
try {
get_node().SET_STATIC(NAME_[0], v);
get_node().SET_STATIC(NAME_[1], v);
} RMF_DECORATOR_CATCH( );
}
"""
self.check = "!nh.GET(NAME_[0]).get_is_null() && !nh.GET(NAME_[1]).get_is_null() && nh.GET_BOTH(NAME_[0]) == nh.GET_BOTH(NAME_[1])"
class RangeAttribute(AttributePair):
def __init__(self, name, data_type, begin, end):
AttributePair.__init__(
self, name, data_type, data_type + "Range", begin, end)
self.get_methods = """
TYPE get_NAME() const {
try {
TYPE ret;
ret[0] = get_node().GET_BOTH(NAME_[0]);
ret[1] = get_node().GET_BOTH(NAME_[1]);
return ret;
} RMF_DECORATOR_CATCH( );
}
TYPE get_static_NAME() const {
try {
TYPE ret;
ret[0] = get_node().GET_STATIC(NAME_[0]);
ret[1] = get_node().GET_STATIC(NAME_[1]);
return ret;
} RMF_DECORATOR_CATCH( );
}
TYPE get_frame_NAME() const {
try {
TYPE ret;
ret[0] = get_node().GET_FRAME(NAME_[0]);
ret[1] = get_node().GET_FRAME(NAME_[1]);
return ret;
} RMF_DECORATOR_CATCH( );
}
"""
self.set_methods = """
void set_NAME(%s v0, %s v1) {
try {
get_node().SET_BOTH(NAME_[0], v0);
get_node().SET_BOTH(NAME_[1], v1);
} RMF_DECORATOR_CATCH( );
}
void set_frame_NAME(%s v0, %s v1) {
try {
get_node().SET_FRAME(NAME_[0], v0);
get_node().SET_FRAME(NAME_[1], v1);
} RMF_DECORATOR_CATCH( );
}
void set_static_NAME(%s v0, %s v1) {
try {
get_node().SET_STATIC(NAME_[0], v0);
get_node().SET_STATIC(NAME_[1], v1);
} RMF_DECORATOR_CATCH( );
}
""" % (data_type, data_type, data_type, data_type, data_type, data_type)
self.check = "!nh.GET(NAME_[0]).get_is_null() && !nh.GET(NAME_[1]).get_is_null() && nh.GET_BOTH(NAME_[0]) < nh.GET_BOTH(NAME_[1])"
self.check = "!nh.GET(NAME_[0]).get_is_null() && !nh.GET(NAME_[1]).get_is_null() && nh.GET_BOTH(NAME_[0]) < nh.GET_BOTH(NAME_[1])"
decorator = """
/** See also NAME and NAMEFactory.
*/
class NAMEConst: public Decorator {
friend class NAMEFactory;
protected:
DATA_MEMBERS
NAMEConst(NodeConstHandle nh,
DATA_ARGUMENTS):
DATA_SAVES {
}
public:
CONSTMETHODS
static std::string get_decorator_type_name() {
return "NAMEConst";
}
RMF_SHOWABLE(NAMEConst, "NAME: " << get_node());
};
/** See also NAMEFactory.
*/
class NAME: public NAMEConst {
friend class NAMEFactory;
NAME(NodeHandle nh,
DATA_ARGUMENTS):
NAMEConst(nh, DATA_PASS_ARGUMENTS) {
}
public:
NONCONSTMETHODS
static std::string get_decorator_type_name() {
return "NAME";
}
};
"""
factory = """
/** Create decorators of type NAME.
*/
class NAMEFactory: public Factory {
Category cat_;
DATA_MEMBERS
HELPERS
public:
NAMEFactory(FileConstHandle fh):
cat_(fh.get_category("CATEGORY")),
DATA_INITIALIZE {
}
NAMEFactory(FileHandle fh):
cat_(fh.get_category("CATEGORY")),
DATA_INITIALIZE {
}
/** Get a NAMEConst for nh.*/
NAMEConst get(NodeConstHandle nh) const {
CREATE_CHECKS
return NAMEConst(nh, DATA_PASS);
}
/** Get a NAME for nh.*/
NAME get(NodeHandle nh) const {
CREATE_CHECKS
return NAME(nh, DATA_PASS);
}
/** Check whether nh has all the attributes required to be a
NAMEConst.*/
bool get_is(NodeConstHandle nh) const {
return FRAME_CHECKS;
}
bool get_is_static(NodeConstHandle nh) const {
return STATIC_CHECKS;
}
RMF_SHOWABLE(NAMEFactory, "NAMEFactory");
};
#ifndef RMF_DOXYGEN
struct NAMEConstFactory: public NAMEFactory {
NAMEConstFactory(FileConstHandle fh):
NAMEFactory(fh) {
}
NAMEConstFactory(FileHandle fh):
NAMEFactory(fh) {
}
};
#endif
"""
class Decorator:
def __init__(self, allowed_types, category, name,
attributes,
init_function="", check_all_attributes=False):
self.name = name
self.category = category
self.allowed_types = allowed_types
self.init_function = init_function
self.attributes = attributes
self.check_all_attributes = check_all_attributes
def _get_data_members(self):
ret = []
for a in self.attributes:
ret.append(a.get_data_members())
return "\n".join(ret)
def _get_methods(self, const):
ret = []
for a in self.attributes:
ret.append(a.get_get_set_methods(const))
return "\n".join(ret)
def _get_bulk_methods(self, const):
ret = []
for a in self.attributes:
ret.append(a.get_bulk_methods(const))
return "\n".join(ret)
def _get_helpers(self):
ret = []
for a in self.attributes:
ret.append(a.get_helpers())
return "\n".join(ret)
def _get_data_arguments(self):
ret = []
for a in self.attributes:
ret.append(a.get_data_arguments())
return ",\n".join(ret)
def _get_data_pass(self, member):
ret = []
for a in self.attributes:
ret.append(a.get_data_pass(member))
return ",\n".join(ret)
def _get_data_saves(self):
ret = []
for a in self.attributes:
ret.append(a.get_data_saves())
return ",\n".join(["Decorator(nh)"] + ret)
def _get_type_check(self):
cret = []
for t in self.allowed_types:
cret.append("nh.get_type() == RMF::%s" % t)
return "(" + "||".join(cret) + ")"
def _get_checks(self, use_all=False):
ret = [self._get_type_check()]
if self.check_all_attributes or use_all:
for a in self.attributes:
ret.append(a.get_check())
else:
# for a in self.attributes:
ret.append(self.attributes[0].get_check())
return "\n && ".join(x for x in ret if x != "")
def _get_construct(self):
ret = []
# make handle missing later
ret.append("Category cat = fh.get_category(\""
+ self.category + "\");")
ret.append("RMF_UNUSED(cat);")
for a in self.attributes:
ret.append(a.get_construct())
return "\n".join(ret)
def _get_data_initialize(self):
ret = []
for a in self.attributes:
ret.append(a.get_data_initialize())
return ", ".join(ret)
def _get_list(self):
ret = [("HELPERS", self._get_helpers()),
("DATA_MEMBERS", self._get_data_members()),
("NONCONSTMETHODS", self._get_methods(False)),
("CONSTMETHODS", self._get_methods(True)),
("DATA_ARGUMENTS", self._get_data_arguments()),
("DATA_SAVES", self._get_data_saves()),
("DATA_PASS_ARGUMENTS", self._get_data_pass(False)),
("DATA_PASS", self._get_data_pass(True)),
("DATA_INITIALIZE", self._get_data_initialize())]
ret.append(("CREATE_CHECKS", """RMF_USAGE_CHECK(%s, std::string("Bad node type. Got \\\"")
+ boost::lexical_cast<std::string>(nh.get_type())
+ "\\\" in decorator type %s");"""
% (self._get_type_check(), self.name)))
ret.append(
| |
{
'commands': [{
'name': 'get-mapping-fields'
}],
'ismappable': True
}})
structure_validator = StructureValidator(integration.yml.path, predefined_scheme='integration')
validator = IntegrationValidator(structure_validator)
assert validator.is_mapping_fields_command_exist()
def test_mapping_fields_command_dont_exist(integration):
"""
Given
- Integration yml file with no get-mapping-fields command and ismappable: True.
When
- Checking if get-mapping-fields command exists.
Then
- validator returns the False. The field ismappable exists, but the command no.
"""
integration.yml.write_dict({'script': {
'commands': [{
'name': 'not-get-mapping-fields'
}],
'ismappable': True
}})
with ChangeCWD(integration.repo_path):
structure_validator = StructureValidator(integration.yml.path, predefined_scheme='integration')
validator = IntegrationValidator(structure_validator)
assert not validator.is_mapping_fields_command_exist()
def test_get_packs_that_should_have_version_raised(repo):
"""
Given
- Different files from different packs in several statuses:
1. Modified integration
2. Modified test-playbook
3. Added script to new pack
4. Added script to existing pack
5. Modified old format script
When
- Running get_packs_that_should_have_version_raised.
Then
- The returning set includes the packs for 1, 4 & 5 and does not include the packs for 2 & 3.
"""
existing_pack1 = repo.create_pack('PackWithModifiedIntegration')
moodified_integration = existing_pack1.create_integration('MyIn')
moodified_integration.create_default_integration()
existing_pack2 = repo.create_pack('ExistingPackWithAddedScript')
added_script_existing_pack = existing_pack2.create_script('MyScript')
added_script_existing_pack.create_default_script()
new_pack = repo.create_pack('NewPack')
added_script_new_pack = new_pack.create_script('MyNewScript')
added_script_new_pack.create_default_script()
existing_pack3 = repo.create_pack('PackWithModifiedOldFile')
modified_old_format_script = existing_pack3.create_script('OldScript')
modified_old_format_script.create_default_script()
existing_pack4 = repo.create_pack('PackWithModifiedTestPlaybook')
moodified_test_playbook = existing_pack4.create_test_playbook('TestBook')
moodified_test_playbook.create_default_test_playbook()
validate_manager = ValidateManager(check_is_unskipped=False)
validate_manager.new_packs = {'NewPack'}
modified_files = {moodified_integration.yml.rel_path, moodified_test_playbook.yml.rel_path}
added_files = {added_script_existing_pack.yml.rel_path, added_script_new_pack.yml.rel_path}
old_files = {modified_old_format_script.yml.rel_path}
with ChangeCWD(repo.path):
packs_that_should_have_version_raised = validate_manager.get_packs_that_should_have_version_raised(
modified_files=modified_files, added_files=added_files, old_format_files=old_files)
assert 'PackWithModifiedIntegration' in packs_that_should_have_version_raised
assert 'ExistingPackWithAddedScript' in packs_that_should_have_version_raised
assert 'PackWithModifiedOldFile' in packs_that_should_have_version_raised
assert 'PackWithModifiedTestPlaybook' not in packs_that_should_have_version_raised
assert 'NewPack' not in packs_that_should_have_version_raised
def test_quite_bc_flag(repo):
existing_pack1 = repo.create_pack('PackWithModifiedIntegration')
moodified_integration = existing_pack1.create_integration('MyIn')
moodified_integration.create_default_integration()
def test_check_file_relevance_and_format_path_non_formatted_relevant_file(mocker):
"""
Given
- file path to validate
When
- file is relevant for validation and should not be formatted
Then
- return the file path
"""
validator_obj = ValidateManager(is_external_repo=True, check_is_unskipped=False)
mocker.patch('demisto_sdk.commands.validate.validate_manager.find_type', return_value=FileType.INTEGRATION)
mocker.patch.object(validator_obj, 'is_old_file_format', return_value=False)
input_file_path = 'Packs/PackName/Integrations/IntegrationName/IntegrationName.yml'
assert validator_obj.check_file_relevance_and_format_path(input_file_path, None, set()) == (input_file_path, '', True)
@pytest.mark.parametrize('input_file_path',
['Packs/pack_id/Integrations/integration_id/test_data/file.json',
'Packs/pack_id/test_data/file.json',
'Packs/pack_id/Scripts/script_id/test_data/file.json',
'Packs/pack_id/TestPlaybooks/test_data/file.json',
'Packs/pack_id/pack_metadata.json',
'Packs/pack_id/Integrations/integration_id/command_examples'])
def test_check_file_relevance_and_format_path_ignored_files(input_file_path):
"""
Given
- file path to validate
When
- file path is of a file that should be ignored
Then
- return None, file is ignored
"""
validator_obj = ValidateManager(is_external_repo=True, check_is_unskipped=False)
assert validator_obj.check_file_relevance_and_format_path(input_file_path, None, set()) == ('', '', True)
@pytest.mark.parametrize('input_file_path',
['OtherDir/Integration/file.json',
'TestData/file.json',
'TestPlaybooks/file.yml',
'docs/dbot/README.md'])
def test_check_file_relevance_and_format_path_ignored_non_pack_files(input_file_path):
"""
Given
- file path to validate
When
- file is not in Packs directory
Then
- return None, file is ignored
"""
validator_obj = ValidateManager(is_external_repo=True, check_is_unskipped=False)
assert validator_obj.check_file_relevance_and_format_path(input_file_path, None, set()) == ('', '', True)
@pytest.mark.parametrize('input_file_path',
[".gitlab/ci/check.yml",
".github/ci/check.yml",
".circleci/ci/check.yml"])
def test_check_file_relevance_and_format_path_ignored_git_and_circle_files(input_file_path):
"""
Given
- file path to validate
When
- file path is a gitlab/circleci/github file
Then
- return None, file is ignored
"""
validator_obj = ValidateManager(is_external_repo=True, check_is_unskipped=False)
assert validator_obj.check_file_relevance_and_format_path(input_file_path, None, set()) == ('', '', True)
def test_check_file_relevance_and_format_path_type_missing_file(mocker):
"""
Given
- file path to validate
When
- file type is not supported
Then
- return None, call error handler
"""
validator_obj = ValidateManager(is_external_repo=True, check_is_unskipped=False)
mocked_handler = mocker.patch.object(validator_obj, 'handle_error', return_value=False)
mocker.patch('demisto_sdk.commands.validate.validate_manager.find_type', return_value=None)
assert validator_obj.check_file_relevance_and_format_path("Packs/type_missing_filename", None, set()) == ('', '', False)
mocked_handler.assert_called()
@pytest.mark.parametrize('input_file_path, file_type',
[('Packs/some_test.py', FileType.PYTHON_FILE),
('Packs/some_file.Tests.ps1', FileType.POWERSHELL_FILE),
('Packs/some_test.js', FileType.JAVASCRIPT_FILE)]
)
def test_check_file_relevance_and_format_path_ignore_test_file(mocker, input_file_path, file_type):
"""
Given
- file path to validate
When
- file is a test file
Then
- return None, file is ignored
"""
validator_obj = ValidateManager(is_external_repo=True, check_is_unskipped=False)
mocker.patch('demisto_sdk.commands.validate.validate_manager.find_type', return_value=file_type)
assert validator_obj.check_file_relevance_and_format_path(input_file_path, None, set()) == ('', '', True)
@pytest.mark.parametrize('input_file_path, file_type',
[('Packs/some_file.py', FileType.PYTHON_FILE),
('Packs/some_file.ps1', FileType.POWERSHELL_FILE),
('Packs/some_file.js', FileType.JAVASCRIPT_FILE)]
)
def test_check_file_relevance_and_format_path_file_to_format(mocker, input_file_path, file_type):
"""
Given
- file path to validate
When
- file should be formatted
Then
- return the formatted file path
"""
validator_obj = ValidateManager(is_external_repo=True, check_is_unskipped=False)
mocker.patch('demisto_sdk.commands.validate.validate_manager.find_type', return_value=file_type)
mocker.patch.object(validator_obj, 'is_old_file_format', return_value=False)
assert validator_obj.check_file_relevance_and_format_path(input_file_path, None, set()) == ('Packs/some_file.yml', '', True)
@pytest.mark.parametrize('input_file_path, old_file_path, file_type',
[('Packs/some_file.py', 'Packs/old_file_path.py', FileType.PYTHON_FILE),
('Packs/some_file.ps1', 'Packs/old_file_path.ps1', FileType.POWERSHELL_FILE),
('Packs/some_file.js', 'Packs/old_file_path.js', FileType.JAVASCRIPT_FILE)]
)
def test_check_file_relevance_and_format_path_file_to_format_with_old_path(mocker,
input_file_path,
old_file_path,
file_type):
"""
Given
- file path to validate and it's old path
When
- file should be formatted and it has been renamed
Then
- return tuple of the formatted path and it's original path
"""
validator_obj = ValidateManager(is_external_repo=True, check_is_unskipped=False)
mocker.patch('demisto_sdk.commands.validate.validate_manager.find_type', return_value=file_type)
mocker.patch.object(validator_obj, 'is_old_file_format', return_value=False)
assert validator_obj.check_file_relevance_and_format_path(input_file_path, old_file_path, set()) ==\
('Packs/some_file.yml', 'Packs/old_file_path.yml', True)
def test_check_file_relevance_and_format_path_old_format_file(mocker):
"""
Given
- file path to validate
When
- file is of an old format
Then
- return None, add the file path to the old_format_files argument
"""
validator_obj = ValidateManager(is_external_repo=True, check_is_unskipped=False)
mocker.patch('demisto_sdk.commands.validate.validate_manager.find_type', return_value=FileType.INTEGRATION)
mocker.patch.object(validator_obj, 'is_old_file_format', return_value=True)
old_format_files: set = set()
assert validator_obj.check_file_relevance_and_format_path('Packs/some_test.yml', None, old_format_files) == ('', '', True)
assert old_format_files == {'Packs/some_test.yml'}
@pytest.mark.parametrize('is_feed', (True, False))
def test_job_sanity(repo, is_feed: bool):
"""
Given
A Job object in a repo
When
Validating the file
Then
Ensure the autogenerated Job files pass
"""
pack = repo.create_pack()
job = pack.create_job(is_feed=is_feed, name='job_name')
validate_manager = ValidateManager(check_is_unskipped=False, file_path=job.path, skip_conf_json=True)
with ChangeCWD(repo.path):
assert validate_manager.validate_job(StructureValidator(job.path, is_new_file=True),
pack_error_ignore_list=list())
@pytest.mark.parametrize('is_feed', (True, False))
@pytest.mark.parametrize('version', ('6.4.9', None, ''))
def test_job_from_version(repo, capsys, is_feed: bool, version: Optional[str]):
"""
Given
A valid Job object in a repo
When
Validating the file
Then
Ensure the autogenerated Job files pass
"""
pack = repo.create_pack()
job = pack.create_job(is_feed, 'job_name')
job.update({'fromVersion': version})
validate_manager = ValidateManager(check_is_unskipped=False, file_path=job.path, skip_conf_json=True)
with ChangeCWD(repo.path):
assert not validate_manager.validate_job(StructureValidator(job.path, is_new_file=True),
pack_error_ignore_list=list())
stdout = capsys.readouterr().out
assert f"fromVersion field in Job needs to be at least {DEFAULT_JOB_FROM_VERSION} (found {version})" in stdout
def test_job_non_feed_with_selected_feeds(repo, capsys):
"""
Given
A Job object in a repo, with non-empty selectedFeeds when isFeed is set to false
When
Validating the file
Then
Ensure an error is raised, and validation fails
"""
pack = repo.create_pack()
job = pack.create_job(is_feed=False, name='job_name', selected_feeds=['feed_name'])
validate_manager = ValidateManager(check_is_unskipped=False, file_path=job.path, skip_conf_json=True)
with ChangeCWD(repo.path):
assert not validate_manager.validate_job(StructureValidator(job.path, is_new_file=True),
pack_error_ignore_list=list())
stdout = capsys.readouterr().out
assert "Job objects cannot have non-empty selectedFeeds when isFeed is set to false" in stdout
def test_job_both_selected_and_all_feeds_in_job(repo, capsys):
"""
Given
A Job object in a repo, with non-empty selectedFeeds values but isAllFields set to true
When
Validating the file
Then
Ensure an error is raised, and validation fails
"""
pack = repo.create_pack()
job = pack.create_job(is_feed=True, name='job_name', selected_feeds=['feed_name'])
job.update({'isAllFeeds': True})
validate_manager = ValidateManager(check_is_unskipped=False, file_path=job.path, skip_conf_json=True)
with ChangeCWD(repo.path):
assert not validate_manager.validate_job(StructureValidator(job.path, is_new_file=True),
pack_error_ignore_list=list())
stdout = capsys.readouterr().out
assert "Job cannot have non-empty selectedFeeds values when isAllFields is set to true" in stdout
@pytest.mark.parametrize('is_feed', (True, False))
@pytest.mark.parametrize('name', ('', ' ', ' ', '\n', '\t'))
def test_job_blank_name(repo, capsys, name: str, is_feed: bool):
"""
Given
A Job object in a repo, with a blank (space/empty) value as its name
When
Validating the file
Then
Ensure an error is raised, and validation fails
"""
pack = repo.create_pack()
job = pack.create_job(is_feed=is_feed, name=name)
job.update({'name': name}) # name is appended with number in create_job, so it must be explicitly set here
validate_manager = ValidateManager(check_is_unskipped=False, file_path=job.path, skip_conf_json=True)
with ChangeCWD(repo.path):
assert not validate_manager.validate_job(StructureValidator(job.path, is_new_file=True),
pack_error_ignore_list=list())
stdout = capsys.readouterr().out
expected_string, expected_code = Errors.empty_or_missing_job_name()
assert expected_string in stdout
assert expected_code in stdout
@pytest.mark.parametrize('is_feed', (True, False))
def test_job_missing_name(repo, capsys, is_feed: bool):
"""
Given
A Job object in a repo, with an empty value as name
When
Validating the file
Then
Ensure an error is raised, and validation fails
"""
pack = repo.create_pack()
job = pack.create_job(is_feed=is_feed)
job.remove('name') # some name is appended with number in create_job, so it must be explicitly removed
validate_manager = ValidateManager(check_is_unskipped=False, file_path=job.path, skip_conf_json=True)
with ChangeCWD(repo.path):
assert not validate_manager.validate_job(StructureValidator(job.path, is_new_file=True),
pack_error_ignore_list=list())
stdout = capsys.readouterr().out
expected_string, expected_code = Errors.empty_or_missing_job_name()
assert expected_string in stdout
assert expected_code in stdout
@pytest.mark.parametrize("is_all_feeds,selected_feeds", ((True, []),
(True, None),
(False, ['my_field']),
(True, ['my_field'])
)
)
def test_job_unexpected_field_values_in_non_feed_job(repo, capsys,
is_all_feeds: bool,
selected_feeds: Optional[List[str]]):
"""
Given
A Job object in a repo, with non-empty selectedFeeds when isFeed is set to false
When
Validating the file
Then
Ensure an error is raised, and validation fails
"""
pack = repo.create_pack()
job = pack.create_job(is_feed=True, name='job_name')
job.update({'isAllFeeds': False})
validate_manager = ValidateManager(check_is_unskipped=False, file_path=job.path, skip_conf_json=True)
with ChangeCWD(repo.path):
assert not validate_manager.validate_job(StructureValidator(job.path, is_new_file=True),
pack_error_ignore_list=list())
stdout = capsys.readouterr().out
assert "Job must either have non-empty selectedFeeds OR have isAllFields set to true when isFeed is set to true" \
in stdout
def test_validate_contributors_file(repo):
"""
Given:
A simple CONTRIBUTORS.md file (see this https://xsoar.pan.dev/docs/packs/packs-format#contributorsmd)
When:
Running validation on the new file
Then:
Ensure the file is passing validation
"""
pack = repo.create_pack()
contributors_file_content = """### Pack Contributors:\n\n---\n- Test UserName\n\n Contributions are welcome and
appreciated. For more info, visit our [Contribution Guide](https://xsoar.pan.dev/docs/contributing/contributing)."""
contributors_file = pack.create_contributors_file(contributors_file_content)
validate_manager = ValidateManager(check_is_unskipped=False, file_path=contributors_file.path, skip_conf_json=True)
assert validate_manager.run_validation_on_specific_files()
def test_validate_pack_name(repo):
"""
Given:
A file in a pack to validate.
When:
Checking if the pack name of the file is valid (the pack name is not changed).
Then:
If new file | |
<filename>myresources/crocodile/core.py
"""
A collection of classes extending the functionality of Python's builtins.
email <EMAIL>
"""
# Typing
# Path
import os
import sys
from pathlib import Path
import string
import random
# Numerical
import numpy as np
# import pandas as pd # heavy weight, avoid unless necessary.
# Meta
import dill
import copy
from datetime import datetime
import datetime as dt # useful for deltatime and timezones.
_ = dt
# ============================== Accessories ============================================
def timestamp(fmt=None, name=None):
"""isoformat is not compatible with file naming convention, this function provides compatible fmt
tip: do not use this to create random addresses as it fails at high speed runs. Random string is better."""
if fmt is None:
fmt = '%Y-%m-%d-%I-%M-%S-%p-%f'
_ = datetime.now().strftime(fmt)
if name:
name = name + '_' + _
else:
name = _
return name
def str2timedelta(past):
"""Converts a human readable string like '1m' or '1d' to a timedate object.
In essence, its gives a `2m` short for `pd.timedelta(minutes=2)`"""
sc = {"s": "seconds", "m": "minutes", "h": "hours", "d": "days", "w": "weeks",
"M": "months", "y": "years"}
key, val = sc[past[-1]], eval(past[:-1])
if key == "months":
key = "days"
val = val * 30
elif key == "years":
key = "weeks"
val = val * 52
return dt.timedelta(**{key: val})
def randstr(length=10, lower=True, upper=True, digits=True, punctuation=False, safe=False):
if safe:
import secrets # interannly, it uses: random.SystemRandom or os.urandom which is hardware-based, not pseudo
return secrets.token_urlsafe(length)
pool = "" + (string.ascii_lowercase if lower else "")
pool = pool + (string.ascii_uppercase if upper else "")
pool = pool + (string.digits if digits else "")
pool = pool + (string.punctuation if punctuation else "")
result_str = ''.join(random.choice(pool) for _ in range(length))
return result_str
def assert_package_installed(package):
"""imports a package and installs it if not."""
try:
pkg = __import__(package)
return pkg
except ImportError:
import subprocess
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
pkg = __import__(package)
return pkg
# ====================================== Classes ====================================
class SaveDecorator(object):
def __init__(self, func, ext=""):
# TODO: migrate from save_decorator to SaveDecorator
# Called with func argumen when constructing the decorated function.
# func argument is passed implicitly by Python.
self.func = func
self.ext = ext
@classmethod
def init(cls, func=None, **kwargs):
"""Always use this method for construction."""
if func is None: # User instantiated the class with no func argument and specified kwargs.
def wrapper(func_):
return cls(func_, **kwargs)
return wrapper # a function ready to be used by Python (pass func to it to instantiate it)
else: # called by Python with func passed and user did not specify non-default kwargs:
return cls(func) # return instance of the class.
def __call__(self, path=None, obj=None, **kwargs):
# Called when calling the decorated function (instance of this called).
if path is None:
import tempfile
path = Path(tempfile.mkdtemp() + "-" + timestamp() + self.ext)
# raise ValueError
else:
if not str(path).endswith(self.ext):
# path = P(str(path) + self.ext)
raise ValueError
else:
# path = P(path)
raise ValueError
# noinspection PyUnreachableCode
path.parent.mkdir(exist_ok=True, parents=True)
self.func(path, obj, **kwargs)
print(f"File {obj} saved @ ", path.absolute().as_uri(), ". Directory: ", path.parent.absolute().as_uri())
return path
def save_decorator(ext=""):
"""Apply default paths, add extension to path, print the saved file path"""
def decorator(func):
def wrapper(obj, path=None, verbose=True, **kwargs):
if path is None:
path = Path.home().joinpath("tmp_results").joinpath(randstr() + ext)
print(f"tb.core: Warning: Path not passed to {func}. "
f"A default path has been chosen: {path.absolute().as_uri()}")
# raise ValueError
else:
if not str(path).endswith(ext):
path = Path(str(path) + ext)
print(f"tb.core: Warning: suffix {ext} is added to path passed {path.as_uri()}")
else:
path = Path(path)
path.parent.mkdir(exist_ok=True, parents=True)
func(path=path, obj=obj, **kwargs)
if verbose:
rep = repr(obj)
rep = rep if len(rep) < 50 else rep[:10] + "... "
print(f"SAVED {rep} @ `{path.absolute().as_uri()}` | Directory: `{path.parent.absolute().as_uri()}`")
return path
return wrapper
return decorator
class Save:
@staticmethod
@save_decorator(".csv")
def csv(obj, path=None):
# obj.to_frame('dtypes').reset_index().to_csv(P(path).append(".dtypes").string)
obj.to_frame('dtypes').reset_index().to_csv(path + ".dtypes")
@staticmethod
@save_decorator(".npy")
def npy(obj, path, **kwargs):
np.save(path, obj, **kwargs)
@staticmethod
@save_decorator(".mat")
def mat(mdict, path=None, **kwargs):
"""
.. note::
Avoid using mat for saving results because of incompatiblity:
* `None` type is not accepted.
* Scalars are conveteed to [1 x 1] arrays.
* etc. As such, there is no gaurantee that you restore what you saved.
Unless you want to pass the results to Matlab animals, avoid this format.
"""
from scipy.io import savemat
for key, value in mdict.items():
if value is None:
mdict[key] = []
savemat(str(path), mdict, **kwargs)
@staticmethod
@save_decorator(".json")
def json(obj, path=None, **kwargs):
"""This format is **compatible** with simple dictionaries that hold strings or numbers
but nothing more than that.
E.g. arrays or any other structure. An example of that is settings dictionary.
It is useful for to generate human-readable file."""
import json
with open(str(path), "w") as file:
json.dump(obj, file, default=lambda x: x.__dict__, **kwargs)
@staticmethod
@save_decorator
def yaml(obj, path, **kwargs):
import yaml
with open(str(path), "w") as file:
yaml.dump(obj, file, **kwargs)
@staticmethod
@save_decorator(".pkl")
def vanilla_pickle(obj, path, **kwargs):
import pickle
with open(str(path), 'wb') as file:
pickle.dump(obj, file, **kwargs)
@staticmethod
@save_decorator(".pkl")
def pickle(obj=None, path=None, r=False, **kwargs):
"""This is based on `dill` package. While very flexible, it comes at the cost of assuming so many packages are
loaded up and it happens implicitly. It often fails at load time and requires same packages to be reloaded first
. Compared to vanilla pickle, the former always raises an error when cannot pickle an object due to
dependency. Dill however, stores all the required packages for any attribute object, but not the class itself,
or the classes that it inherits (at least at with this version)."""
with open(str(path), 'wb') as file:
dill.dump(obj, file, recurse=r, **kwargs)
@staticmethod
def pickle_s(obj):
binary = dill.dumps(obj)
return binary
class Base(object):
def __init__(self, *args, **kwargs):
pass
def __getstate__(self):
"""This method is used by Python internally when an instance of the class is pickled.
attributes that you do not want to be pickled for one reason or the other, should be omitted from the
returned dictionary (as opposed to setting them to None)."""
return self.__dict__.copy()
def __setstate__(self, state):
"""The solution to recover missing objects from state, is dependent on where it came from.
If the attribute was instantiated by the class itself, then, similar thing should happen here.
If the object was passed to the __init__ by the caller, it should be passed again. For the meanwhile it should
be set to None."""
self.__dict__.update(state)
def save_code(self, path):
"""a usecase for including code in the save is when the source code is continously
changing and still you want to reload an old version."""
import inspect
module = inspect.getmodule(self)
if hasattr(module, "__file__"): file = Path(module.__file__)
else: raise FileNotFoundError(f"Attempted to save code from a script running in interactive session! "
f"module should be imported instead.")
Path(path).write_text(file.read_text())
return Path(path) if type(path) is str else path # path could be tb.P, better than Path
def save(self, path=None, itself=True, r=False, include_code=False):
"""Pickles the object.
:param path: destination file.
:param itself: `itself` means the object (self) will be pickled straight away. This is the default behaviour,
however, it requires (in case of composed objects) that every sub-object is well-behaved and has the appropriate
state methods implemented. The alternative to this option (itself=False) is to save __dict__ only
(assuming it is pure data rather than code, otherwise recusive flag must be set), then the class itself is
required later and the `from_pickled_state` method should be used to reload the instance again.
The disadvantage of this method is that __init__ method will be used again at reconstruction time
of the object before the attributes are monkey-patched.
It is very arduous to design __init__ method that is convenient (uses plethora of
default arguments) and works at the same time with no input at reconstruction time.
# Use only for classes with whacky behaviours or too expensive to redesign
# methodology: 1- Save state, 2- save code. 3- initialize from __init__, 4- populate __dict__
:param include_code: `save_code` will be called.
:param r: recursive flag.
* Dill package manages to resconstruct the object by loading up all the appropriate libraries again
IF the object is restored while directory is @ the same location object was created, thus,
no need for |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.