id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/EggLib-3.3.0.tar.gz/EggLib-3.3.0/src/egglib/coalesce/_param_helpers.py | from .. import eggwrapper as _eggwrapper
from .. import stats, _interface
class ParamDict(object):
"""
:class:`dict`-like class managing parameters. Do most of what a
dictionary does except for adding and removing keys. The order of
parameters is fixed and consistent.
In addition to the methods documented below, :class:`.ParamDict`
instances support the following operations (where ``params`` in one
instance):
+------------------------+--------------------------------------------------------+
|Expression | Action |
+========================+========================================================+
|``len(params)`` | Number of parameters |
+------------------------+--------------------------------------------------------+
|``params[key]`` | Get the value of parameter ``key`` |
+------------------------+--------------------------------------------------------+
|``params[key] = value`` | Assign ``value`` to parameter ``key`` (see note below) |
+------------------------+--------------------------------------------------------+
|``for key in params`` | Same as ``for key in params.keys()`` |
+------------------------+--------------------------------------------------------+
|``reversed(params)`` | Reversed iterator |
+------------------------+--------------------------------------------------------+
|``key in params`` | Check if ``key`` in a parameter name |
+------------------------+--------------------------------------------------------+
|``str(params)`` | Representation of the instance as a :class:`dict` |
+------------------------+--------------------------------------------------------+
Note that the ``params[key] = value`` expression is straightforward
only for parameters that have a single value. For parameters
represented by a :class:`.ParamList` instance, this expression is
only supported if the right-hand operand is a sequence of matching
length (in order to set all values at once). For parameters
represented by a :class:`.ParamMatrix` instance, this expression is
only supported if the right-hand operand is another
:class:`.ParamMatrix` instance of matching dimension. For
``events``, this expression is not supported at all. In all cases
where ``params[key]`` returns :class:`.ParamList`,
:class:`.ParamMatrix` or a :class:`.EventList` instance, the
returned value can be modified using its own methods.
"""
_keys = ['num_pop', 'num_sites', 'recomb', 'theta', 'num_mut',
'mut_model', 'TPM_proba', 'TPM_param', 'num_alleles',
'rand_start', 'num_chrom', 'num_indiv', 'N', 'G', 's',
'site_pos', 'site_weight', 'migr_matrix', 'trans_matrix',
'events', 'max_iter']
# sub-lists with only parameters that can be changed between
# simulations
_keys_scalar = ['num_sites', 'recomb', 'theta', 'num_mut',
'mut_model', 'TPM_proba', 'TPM_param', 'num_alleles',
'rand_start']
_keys_list = ['num_chrom', 'num_indiv', 'N', 'G', 's', 'site_pos',
'site_weight']
_keys_matrix = ['migr_matrix', 'trans_matrix']
def __init__(self, npop):
if not isinstance(npop, int): raise TypeError('invalid type for `npop`')
if npop < 1: raise ValueError('invalid value for `npop`')
self._params = _eggwrapper.Params(npop, 0.0)
self._migr = self._params.M()
self._params.set_L(0)
self._npop = npop
self._num_chrom = ParamList(getter=self._params.get_n1,
setter=self._params.set_n1,
num_values=npop,
check_item=lambda x: x>=0)
self._num_indiv = ParamList(getter=self._params.get_n2,
setter=self._params.set_n2,
num_values=npop,
check_item=lambda x: x>=0)
self._N = ParamList(getter=self._params.get_N,
setter=self._params.set_N,
num_values=npop,
check_item=lambda x: x>0.0)
self._G = ParamList(getter=self._params.get_G,
setter=self._params.set_G,
num_values=npop,
check_item=lambda x: True)
self._s = ParamList(getter=self._params.get_s,
setter=self._params.set_s,
num_values=npop,
check_item=lambda x: x>=0.0 and x<=1.0)
self._site_pos = ParamList(getter=self._params.get_sitePos,
setter=self._params.set_sitePos,
num_values=0,
check_item=lambda x: x>=0.0 and x<=1.0)
self._site_weight = ParamList(getter=self._params.get_siteW,
setter=self._params.set_siteW,
num_values=0,
check_item=lambda x: x>0.0)
self._migr_matrix = ParamMatrix(getter=self._migr.get_pair,
setter=self._migr.set_pair,
num_values=npop,
check_item=lambda x:x>=0.0,
check_active=lambda: True,
set_active=None)
self._trans_matrix = ParamMatrix(getter=self._params.get_transW_pair,
setter=self._params.set_transW_pair,
num_values=2,
check_item=lambda x:x>0.0,
check_active=lambda: self._params.get_transW_matrix(),
set_active=self._params.set_transW_matrix)
self._events = EventList(npop=npop,
add=self._params.addChange,
clear=self._params.clearChanges)
def disable_trans_matrix(self):
"""
Disable transition weight matrix.
Disable the matrix of weights of transition between alleles (it
is disabled by default, and automatically activated i any value
is set). This sets all weights to 1.0.
"""
self._params.set_transW_matrix(False)
def get_values(self, other):
"""
Import parameter values.
Update the dictionary with values from another
:class:`.ParamDict` instance (with the same numbers of
populations and sites).
"""
if not isinstance(other, ParamDict): raise TypeError('`other` should be a `ParamDict` instance')
self['num_sites'] = other['num_sites']
self['num_alleles'] = other['num_alleles']
for k in self._keys:
if k not in ['num_pop', 'num_sites', 'num_alleles', 'events']: self[k] = other[k]
self._events.replace(other._events)
def summary(self):
"""
Parameter summary.
Return a string displaying all current values of parameters at
the level of the C++ simulator. Use for debugging only, as the
format is not guaranteed to be stable.
"""
return self._params.summary()
def _set(self, key, values): # alternative setter
if key in self._keys_scalar: self[key] = values
elif key in self._keys_list: self[key][values[0]] = values[1]
elif key in self._keys_matrix: self[key][values[0], values[1]] = values[2]
else: raise KeyError('this parameter cannot be modified between simulations')
def set_migr(self, value):
"""
Set migration rates.
Set all pairwise migration rates to ``value/(num_pop-1)``.
"""
if value < 0.0: raise ValueError('invalid value for `migr`')
self._migr.set_all(value)
def mk_structure(self, skip_indiv=False, outgroup_label=None):
"""
Create structure object.
Export a :class:`.Structure` instance containing the
structure information corresponding to currently loaded
simulation parameters.
:param skip_indiv: this argument determines whether the individuals
level should be skipped (if ``True``, alleles from each given
individual are treated as belonging to separate haploid individuals.
:param outgroup_label: this argument indicates the label of the outgroup
population. It must be passed as a string. Default value is None.
.. warning::
The individual level cannot be processed if the ploidy is
not constant (mixing sampled chromosomes and sampled
individuals).
:return: A new :class:`.Structure` instance.
"""
# process populations
struct_ing = {None: {}}
struct_out = {}
cnt_sam = 0
cnt_idv = 0
for k in range(self._npop):
# get list of sample indexes (as a list of tuples/per individual)
sam_start = cnt_sam
idv = [(sam_start+i*2, sam_start+i*2+1) for i in range(self._params.get_n2(k))]
sam_start += 2*len(idv)
idv.extend([(sam_start+i,) for i in range(self._params.get_n1(k))])
cnt_sam += 2*self._params.get_n2(k) + self._params.get_n1(k)
# process population
if skip_indiv:
idv = [(j,) for i in idv for j in i]
idv = dict(zip(map(str, range(cnt_idv, cnt_idv+len(idv))), idv))
if outgroup_label != None and k == int(outgroup_label):
struct_out = idv
else:
struct_ing[None][str(k)] = idv
cnt_idv += len(idv)
# process delayed samples
if self._params.nDSChanges() > 0:
for event in self._events:
if event['cat'] == 'sample':
k = event['label']
idv = [(cnt_sam+i*2, cnt_sam+i*2+1) for i in range(event['num_indiv'])]
idv.extend([(cnt_sam+i,) for i in range(event['num_chrom'])])
cnt_sam += 2 * event['num_indiv'] + event['num_chrom']
if skip_indiv: idv = [(j,) for i in idv for j in i]
idv = dict(zip(map(str, range(cnt_idv, cnt_idv+len(idv))), idv))
if outgroup_label != None and k == int(outgroup_label):
struct_out = idv
else:
if str(k) not in struct_ing[None]: struct_ing[None][str(k)] = {}
struct_ing[None][str(k)].update(idv)
cnt_idv += len(idv)
if len(struct_ing[None]) == 0: raise ValueError('cannot generate structure')
return _interface.struct_from_dict(struct_ing, struct_out)
####################################################################
# below: dict-like methods
####################################################################
def has_key(self, key):
"""
Tell if a parameter exists.
Return a boolean indicating if the passed name is one of the
parameters.
"""
return key in self._keys
def get(self, key, default=None):
"""
Get a parameter value.
Return the value for *key* if *key* is one of the parameters,
else return the value passed as *default* (by default,
``None``). This method therefore never raises a
:exc:`KeyError`.
"""
if key in self._keys: return self[key]
return default
def __iter__(self):
for key in self._keys: yield key
def keys(self):
"""
Iterator over the parameter names.
"""
for key in self._keys: yield key
def values(self):
"""
Iterator over the parameter values.
"""
for key in self._keys: yield self[key]
def items(self):
"""
Iterator over content of the instance. Each iteration round
yield a ``(key, value)`` parameter name and value pair.
"""
for key in self._keys: yield key, self[key]
def copy(self):
"""
Shallow copy.
Return a shallow copy of this instance. All modifications of the
values of either copy will modify the other one (even
modification of integer, float and string parameters).
"""
ret = ParamDict(self._npop)
ret._params = self._params
return ret
def update(self, other=None, **kwargs):
"""
Import parameter values.
Update the dictionary with the ``(key, value)`` parameter name
and value pairs from the object passed as *other*, overwriting
existing keys and raising a :exc:`KeyError`
exception if an unknown parameter name is met.
Accepts :class:`dict` instances, or else any iterable of
``(key, value)`` pairs. If keyword arguments are specified, the
instance is then updated with those, as in
``param_dict.update(theta=5.0, recomb=2.5)``.
"""
if isinstance(other, ParamDict):
self.get_values(other)
elif other is not None:
if 'num_sites' in other: self['num_sites'] = other['num_sites']
if 'num_alleles' in other: self['num_alleles'] = other['num_alleles']
try:
iter_ = other.items()
except AttributeError: iter_ = iter(other)
for k, v in iter_: self[k] = v
if 'num_sites' in kwargs:
self['num_sites'] = kwargs['num_sites']
del kwargs['num_sites']
if 'num_alleles' in kwargs:
self['num_alleles'] = kwargs['num_alleles']
del kwargs['num_alleles']
for k, v in kwargs.items(): self[k] = v
def __len__(self):
return len(self._keys)
def __reversed__(self):
for key in reversed(self._keys):
yield key
def __contains__(self, key):
return key in self._keys
def __str__(self):
return str(dict(self.items()))
def __getitem__(self, key):
if key == 'num_pop': return self._npop
elif key == 'num_sites': return self._params.get_L()
elif key == 'recomb': return self._params.get_R()
elif key == 'theta': return self._params.get_theta()
elif key == 'num_mut': return self._params.get_fixed()
elif key == 'mut_model':
model = self._params.get_mutmodel()
if model == _eggwrapper.Params.KAM: return 'KAM'
elif model == _eggwrapper.Params.IAM: return 'IAM'
elif model == _eggwrapper.Params.SMM: return 'SMM'
elif model == _eggwrapper.Params.TPM: return 'TPM'
elif key == 'TPM_proba': return self._params.get_TPMproba()
elif key == 'TPM_param': return self._params.get_TPMparam()
elif key == 'num_alleles': return self._params.get_K()
elif key == 'rand_start': return self._params.get_random_start_allele()
elif key == 'num_chrom': return self._num_chrom
elif key == 'num_indiv': return self._num_indiv
elif key == 'N': return self._N
elif key == 'G': return self._G
elif key == 's': return self._s
elif key == 'site_pos': return self._site_pos
elif key == 'site_weight': return self._site_weight
elif key == 'migr_matrix': return self._migr_matrix
elif key == 'trans_matrix': return self._trans_matrix
elif key == 'events': return self._events
elif key == 'max_iter': return self._params.get_max_iter()
else: raise KeyError('invalid parameter name: {0}'.format(key))
def __setitem__(self, key, value):
if key == 'num_pop': raise ValueError('the number of populations is read-only (it must be set at construction time)')
elif key == 'num_sites':
if value < 0: raise ValueError('invalid value for `{0}`'.format(key))
self._params.set_L(value)
self._site_pos._num_values = value
self._site_weight._num_values = value
if value > 0: self._params.autoSitePos()
elif key == 'recomb':
if value < 0.0: raise ValueError('invalid value for `{0}`'.format(key))
self._params.set_R(value)
elif key == 'theta':
if value < 0: raise ValueError('invalid value for `{0}`'.format(key))
if value > 0.0 and self._params.get_fixed() > 0.0: raise ValueError('it is not allowed to set both theta and the number of mutations to non-zero')
self._params.set_theta(value)
elif key == 'num_mut':
if value < 0: raise ValueError('invalid value for `{0}`'.format(key))
if value > 0.0 and self._params.get_theta() > 0.0: raise ValueError('it is not allowed to set both theta and the number of mutations to non-zero')
self._params.set_fixed(value)
elif key == 'mut_model':
if value == 'KAM': self._params.set_mutmodel(_eggwrapper.Params.KAM)
elif value == 'IAM': self._params.set_mutmodel(_eggwrapper.Params.IAM)
elif value == 'SMM': self._params.set_mutmodel(_eggwrapper.Params.SMM)
elif value == 'TPM': self._params.set_mutmodel(_eggwrapper.Params.TPM)
else: raise ValueError('invalid value for `{0}`'.format(key))
elif key == 'TPM_proba':
if value < 0 or value > 1: raise ValueError('invalid value for `{0}`'.format(key))
self._params.set_TPMproba(value)
elif key == 'TPM_param':
if value < 0 or value > 1: raise ValueError('invalid value for `{0}`'.format(key))
self._params.set_TPMparam(value)
elif key == 'num_alleles':
if value < 2: raise ValueError('invalid value for `{0}`'.format(key))
self._params.set_K(value)
self._trans_matrix._num_values = value
elif key == 'rand_start':
self._params.set_random_start_allele(value)
elif key == 'max_iter':
if value < 0: raise ValueError('maximum number of iterations cannot be negative')
self._params.set_max_iter(value)
elif key == 'num_chrom': self._num_chrom[:] = value
elif key == 'num_indiv': self._num_indiv[:] = value
elif key == 'N': self._N[:] = value
elif key == 'G': self._G[:] = value
elif key == 's': self._s[:] = value
elif key == 'site_pos': self._site_pos[:] = value
elif key == 'site_weight': self._site_weight[:] = value
elif key == 'migr_matrix': self._migr_matrix.get_values(value)
elif key == 'trans_matrix': self._trans_matrix.get_values(value)
elif key == 'events': raise ValueError('cannot replace the list of events as a whole')
else: raise KeyError('invalid parameter name: `{0}`'.format(key))
def add_event(self, cat, T, **params):
"""
Add an event to the list. See the documentation for the class
:class:`.Simulator` for more details on what is expected as
arguments.
:param cat: category of the event.
:param T: date of the event.
:param params: all needed parameters for this category of
events.
"""
self._events.add(cat, T, **params)
class ParamList(object):
"""
:class:`list`-like class managing values for a parameter. Do
most of what a list does except for adding and removing values. No
methods :meth:`!reverse` and :meth:`!sort` are provided either
as they make little sense. Expressions involving the ``+``
and ``*`` arithmetic operators are supported, but return genuine
:class:`list` instances that are disconnected from the original
parameter holder.
In addition to the methods documented below, :class:`.ParamList`
instances support the following operations (where ``items`` is a
:class:`.ParamList` instance):
+--------------------------+----------------------------------------------------------------+
|Expression | Action |
+==========================+================================================================+
|``len(items)`` | Number of items |
+--------------------------+----------------------------------------------------------------+
|``items[i]`` | Get *i*\ th item |
+--------------------------+----------------------------------------------------------------+
|``items[i:j]`` | Slice from *i* to *j* |
+--------------------------+----------------------------------------------------------------+
|``items[i:j:k]`` | Slice from *i* to *j* with step *k* |
+--------------------------+----------------------------------------------------------------+
|``items[i] = value`` | Assign ``value`` to parameter ``key`` |
+--------------------------+----------------------------------------------------------------+
|``items[i:j] = values`` | Replace a slice of values (with a sequence of the same length) |
+--------------------------+----------------------------------------------------------------+
|``items[i:j:k] = values`` | Replace a slice of values (with a sequence of the same length) |
+--------------------------+----------------------------------------------------------------+
|``for item in items`` | Iterates over items |
+--------------------------+----------------------------------------------------------------+
|``reversed(items)`` | Reversed iterator |
+--------------------------+----------------------------------------------------------------+
|``item in items`` | Check if ``item`` is present among items |
+--------------------------+----------------------------------------------------------------+
|``items + other`` | Same as ``list(items) + other`` (returns a :class:`list`) |
+--------------------------+----------------------------------------------------------------+
|``items * n`` | Same as ``list(items) * n`` (returns a :class:`list`) |
+--------------------------+----------------------------------------------------------------+
|``str(items)`` | Representation of the instance as a list |
+--------------------------+----------------------------------------------------------------+
The indexing operator (``[]``) supports negative indexes (to count
from the end) and slices operators, just as for the built-in type
:class:`list`. However, all operators (such as ``del``) or methods
(such as :meth:`!append`, :meth:`!extend`,
:meth:`!remove`) that would change the length of the list are
not available.
"""
def __init__(self, getter, setter, num_values, check_item):
self._getter = getter
self._setter = setter
self._num_values = num_values
self._check_item = check_item
def __getitem__(self, index):
if isinstance(index, slice):
return tuple(map(self.__getitem__, range(*index.indices(self._num_values))))
else:
if index < 0: index = self._num_values + index
if index >= self._num_values: raise ValueError('list index out of range')
return self._getter(index)
def __setitem__(self, index, value):
if isinstance(index, slice):
rng = list(range(*index.indices(self._num_values)))
if len(rng) != len(value): raise ValueError('number of values is required to match number of indexes for a slice assignment')
for i, v in zip(rng, value):
if self._check_item(v) == False: raise ValueError('value out of range')
self._setter(i, v)
else:
if index < 0: index = self._num_values + index
if index >= self._num_values: raise ValueError('list index out of range')
if self._check_item(value) == False: raise ValueError('value out of range')
self._setter(index, value)
def __iter__(self):
for i in range(self._num_values):
yield self._getter(i)
def count(self, value):
"""
Number of items that are equal to *value*.
"""
cnt = 0
for i in range(self._num_values):
if self._getter(i) == value: cnt += 1
return cnt
def index(self, value, imin=0, imax=None):
"""
Index of the first value matching *value*. The
returned value is ``>=imin`` and ``<imax`` if they are provided.
Raise a :exc:`ValueError` if the value is not found.
"""
if imax is None: imax = self._num_values
for i in range(imin, imax):
if self._getter(i) == value: return i
else:
raise ValueError('{0} is not in list'.format(value))
def __add__(self, other):
return list(self) + other
def __radd__(self, other):
return other + list(self)
def __mul__(self, num):
return list(self) * num
def __rmul__(self, num):
return num * list(self)
def __contains__(self, value):
for i in range(self._num_values):
if self._getter(i) == value: return True
else:
return False
def __len__(self):
return self._num_values
def __str__(self):
return str(list(self))
def __repr__(self):
if self._num_values <= 10: return str(list(self))
else: return '<list of {0} values>'.format(self._num_values)
def __reversed__(self):
for i in reversed(range(self._num_values)):
yield self._getter(i)
class ParamMatrix(object):
"""
:class:`list`-like class managing values for a parameter as a
matrix. Similar to :class:`.ParamList` except that it holds a
matrix. It therefore implements less methods (no support for ``+``
and ``*`` operators, no slices) and use double indexing system as in
``matrix[i, j] = x``, allowing both setting and getting values. The
iterator, such as in ``for i in matrix``, flattens the matrix,
returning all values from the first row, then those from the second
tow, and so on (replacing the diagonal values by ``None``).
``len(matrix)`` returns the dimension (number of rows, which is the
same as the number of columns).
In addition to the methods documented below, :class:`.ParamMatrix`
instances support the following operations (where ``matrix`` is a
:class:`.ParamMatrix` instance):
+--------------------------+----------------------------------------------------------------------+
|Expression | Action |
+==========================+======================================================================+
|``len(matrix)`` | Size of the matrix (as number of rows/colums) |
+--------------------------+----------------------------------------------------------------------+
|``matrix[i,j]`` | Get *j*\ th item of the *i*\ th row (``None`` for diagonal) |
+--------------------------+----------------------------------------------------------------------+
|``matrix[i,j] = value`` | Assign ``value`` to *(i,j)*\ th item (no diagonal) |
+--------------------------+----------------------------------------------------------------------+
|``for item in matrix`` | Iterates over items (row-first *flat* iteration) |
+--------------------------+----------------------------------------------------------------------+
|``reversed(matrix)`` | Reversed iterator |
+--------------------------+----------------------------------------------------------------------+
|``item in matrix`` | Check if ``item`` is present among items |
+--------------------------+----------------------------------------------------------------------+
|``str(matrix)`` | Representation of the instance as a nested list (including diagonal) |
+--------------------------+----------------------------------------------------------------------+
It is not possible to slice-set ranges of values in the matrix, but
one can set all values at once from a compatible source with
:meth:`~.ParamMatrix.get_values`.
"""
def __init__(self, getter, setter, num_values, check_item, check_active, set_active):
self._getter = getter
self._setter = setter
self._num_values = num_values
self._check_item = check_item
self._check_active = check_active
self._set_active = set_active
def get_values(self, other):
"""
Import parameter values.
Get all values from *other*, which must be another
:class:`.ParamMatrix` or a nested sequence (such as a
:class:`list` or :class:`list`). The dimension of *other* must
be the same as the current instance. Not that in the input
object is not a :class:`.ParamMatrix`, any value it can have on
the diagonal will be ignored (but they must be present to avoid
shifting indexes).
"""
if isinstance(other, ParamMatrix):
if other._num_values != self._num_values: raise ValueError('`other` must have the same dimension')
if not other._check_active():
if self._check_active(): self._set_active(False)
return
if not self._check_active():
self._set_active(True)
for i in range(self._num_values):
for j in range(self._num_values):
if i != j: self._setter(i, j, other._getter(i, j))
# use of other._getter requires that number has been checked
else:
if not self._check_active(): self._set_active(True)
if len(other) != self._num_values: raise ValueError('`other` must have the same dimension')
for i, row in enumerate(other):
if len(row) != self._num_values: raise ValueError('`other` must have the same dimension')
for j, value in enumerate(row):
if i != j:
if self._check_item(value) == False: raise ValueError('value out of range')
self._setter(i, j, value)
def __getitem__(self, idx):
if len(idx) != 2: raise ValueError('expect a tuple of two values')
if idx[0] < 0: idx[0] = self._num_values + idx[0]
if idx[1] < 0: idx[1] = self._num_values + idx[1]
if idx[0] >= self._num_values: raise ValueError('matrix index out of range')
if idx[1] >= self._num_values: raise ValueError('matrix index out of range')
if idx[0] == idx[1]: return None
return self._getter(idx[0], idx[1])
def __setitem__(self, idx, value):
if len(idx) != 2: raise ValueError('expect a tuple of two values')
if idx[0] < 0: idx[0] = self._num_values + idx[0]
if idx[1] < 0: idx[1] = self._num_values + idx[1]
if idx[0] >= self._num_values: raise ValueError('matrix index out of range')
if idx[1] >= self._num_values: raise ValueError('matrix index out of range')
if idx[0] == idx[1]:
if value is not None: raise ValueError('cannot set matrix diagonal to a value different than `None`')
else: return
if self._check_item(value) == False: raise ValueError('value out of range')
if not self._check_active(): self._set_active(True)
self._setter(idx[0], idx[1], value)
def __iter__(self):
for i in range(self._num_values):
for j in range(self._num_values):
if i == j: yield None
else: yield self._getter(i, j)
def count(self, value):
"""
Number of items that are equal to *value*.
"""
cnt = 0
for i in range(self._num_values):
for j in range(self._num_values):
if i!=j and self._getter(i, j) == value: cnt += 1
return cnt
def index(self, value, imin=0, imax=None, jmin=0, jmax=None):
"""
Get the index of a given value.
Return the ``(i, j)`` tuple of indexes of the first value
(iterating first over rows and then over columns) matching
*value*. The returned row index is >=imin and <imax and the
returns column index is >=jmin and <jmax if they are provided.
Raise a :exc:`ValueError` if the value is not found.
The diagonal is not considered.
"""
if imax is None: imax = self._num_values
if jmax is None: jmax = self._num_values
for i in range(imin, imax):
for j in range(jmin, jmax):
if i!=j and self._getter(i, j) == value: return (i, j)
else:
raise ValueError('{0} is not in matrix'.format(value))
def __contains__(self, value):
for i in range(self._num_values):
for j in range(self._num_values):
if i!=j and self._getter(i, j) == value: return True
else:
return False
def __len__(self):
return self._num_values
def __str__(self):
return str([[self[i,j] for j in range(self._num_values)] for i in range(self._num_values)])
def __repr__(self):
if self._num_values <= 4: return str([[self[i,j] for j in range(self._num_values)] for i in range(self._num_values)])
else: return '<matrix of {0}*{0} values>'.format(self._num_values)
def __reversed__(self):
for i in reversed(range(self._num_values)):
for j in reversed(range(self._num_values)):
if i == j: yield None
else: yield self._getter(i, j)
class EventList(object):
"""
Class storing the list of demographic events. Even if events appear
as unsorted, they are internally sorted so that the user does not
have to care about their order. This class has limited
functionality: add events (but not removing them), accessing and
modifying parameters.
In addition to the methods documented below, :class:`.EventList`
instances support the following operations (where ``events`` is an
:class:`.EventList` instance):
+--------------------------+----------------------------------------------------------------------+
|Expression | Action |
+==========================+======================================================================+
| ``len(events)`` | Number of events currently loaded |
+--------------------------+----------------------------------------------------------------------+
| ``events[i]`` | Dctionary with parameters of the *i*\ th event (see note) |
+--------------------------+----------------------------------------------------------------------+
| ``for event in events`` | Same as ``for i in range(len(events)): event = events[i]`` |
+--------------------------+----------------------------------------------------------------------+
| ``str(events)``` | Representation of the instance, roughly as a list |
+--------------------------+----------------------------------------------------------------------+
The string representation is such as a string at the first level,
but each event is represented as an angle-bracketed delimited string
containing comma-separated ``key=value`` pairs with the parameter as
``key`` and its value as ``value`` (with an additional key,
``event_index``, providing the index of the event in the list).
.. note::
There is no way to modify content of the instance using the
indexing operator ``events[i]``. One must use
:meth:`~.EventList.update`.
More information on the list of events and their parameters is
available in the documentation for class :class:`.Simulator`.
"""
_map_enum = {
'size': _eggwrapper.Event.change_N,
'migr': _eggwrapper.Event.change_M,
'pair_migr': _eggwrapper.Event.change_Mp,
'growth': _eggwrapper.Event.change_G,
'selfing': _eggwrapper.Event.change_s,
'recombination': _eggwrapper.Event.change_R,
'bottleneck': _eggwrapper.Event.bottleneck,
'admixture': _eggwrapper.Event.admixture,
'sample': _eggwrapper.Event.delayed,
'merge': None
}
_required_parameters = { # not: T is enforced in add's signature
'size': set(['N']),
'migr': set(['M']),
'pair_migr': set(['src', 'dst', 'M']),
'growth': set(['G']),
'selfing': set(['s']),
'recombination': set(['R']),
'bottleneck': set(['S']),
'admixture': set(['src', 'dst', 'proba']),
'sample': set(['idx', 'label', 'num_chrom', 'num_indiv']),
'merge': set(['src', 'dst'])
}
_optional_parameters = {
'size': set(['idx']),
'growth': set(['idx']),
'selfing': set(['idx']),
'bottleneck': set(['idx'])
}
_all_parameters = {}
for i in _required_parameters:
_all_parameters[i] = _required_parameters[i] | _optional_parameters.get(i, set())
_all_parameters[i].add('T')
def __init__(self, npop, add, clear):
self._npop = npop
self._add = add
self._clear = clear
self._events = []
def replace(self, other):
"""
Replace list of events.
Replace own list of events with the one in the
:class:`.EventList` instance passed as *other*. The current list
of events is dropped.
"""
self.clear() # let self.clear() call self._clear() for security
for event in other: self.add(** event)
def clear(self):
"""
Clear list of events.
"""
self._clear() # do this first or the Event objects might be garbage-collected!
del self._events[:]
def __len__(self):
return len(self._events)
def __iter__(self):
for params, changes in self._events:
yield dict(params)
def __str__(self):
return ('[' + ', '.join([
'<event_index={0};'.format(i)
+ ';'.join(['{0}={1}'.format(k,v) for (k,v) in d.items()]) + '>'
for i, (d, changes) in enumerate(self._events)]) + ']')
def __repr__(self):
return str(self)
def __getitem__(self, i):
if i >= len(self._events): raise ValueError('invalid event index')
return dict(self._events[i][0])
def add(self, cat, T, **params):
"""
Add an event to the list. See the documentation for the class
:class:`.Simulator` for more details on what is expected as
arguments.
:param cat: category of the event.
:param T: date of the event.
:param params: all needed parameters for this category of
events.
"""
# check that category is valid
if cat not in self._all_parameters:
raise ValueError('invalid event category: `{0}`'.format(cat))
# check that all required parameters are present
if not self._required_parameters[cat].issubset(params):
raise ValueError('event `{0}` requires: {1}'.format(cat, ', '.join(self._required_parameters[cat] - set(params.keys()))))
# create the required backend objects
if cat == 'merge':
changes = [_eggwrapper.Event(self._map_enum['admixture'], T)]
if (params['src'] < 0 or params['src'] >= self._npop or
params['dst'] < 0 or params['dst'] >= self._npop or
params['src'] == params['dst']):
raise ValueError('invalid population indexes provided for `merge` event')
changes[0].set_index(params['src'])
changes[0].set_dest(params['dst'])
changes[0].set_param(1.0)
for i in range(self._npop):
if i != params['src']:
changes.append(_eggwrapper.Event(self._map_enum['pair_migr'], T))
changes[-1].set_param(0.0)
changes[-1].set_index(i)
changes[-1].set_dest(params['src'])
else:
changes = [_eggwrapper.Event(self._map_enum[cat], T)]
# add the event to the internal list
params['cat'] = cat
params['T'] = T
self._events.append((params, changes))
params_update = dict(params)
# use the update method to set parameters (delete the event in case of an error)
del params_update['cat']
del params_update['T']
try:
self.update(len(self._events) - 1, **params_update)
except ValueError:
del self._events[-1]
raise
# actually add changes to the lower-level Params
for change in changes: self._add(change)
def update(self, event_index, **params):
"""
Modify any parameter from one of the event of the list. If an
event's date is modified, sorting will be updated automatically.
:param event_index: index of the event to modify (based on the
order in which events have been specified with :meth:`.add`,
which is the same order as events appear when representing
the instance or iterating).
:param params: keyword arguments specifying what parameters to
modify. Only parameters that have to be changed should be
specified.
"""
# check index, get category
if event_index < 0 or event_index >= len(self._events): raise IndexError('invalid event index')
cat = self._events[event_index][0]['cat']
change0 = self._events[event_index][1][0] # not used if complex change
# initialize bool to perform ad-hoc tests
test1 = False
test2 = False
# process complex events
if cat == 'merge':
if 'T' in params:
for change in self._events[event_index][1]:
change.move(params['T'])
if 'src' in params:
test2 = True
if params['src'] < 0 or params['src'] > self._npop: raise ValueError('event `{0}`, population index out of range'.format(cat))
self._events[event_index][1][0].set_index(params['src'])
for change in self._events[event_index][1][1:]:
change.set_dest(params['src'])
if 'dst' in params:
test2 = True
if params['dst'] < 0 or params['dst'] > self._npop: raise ValueError('event `{0}`, population index out of range'.format(cat))
self._events[event_index][1][0].set_dest(params['dst'])
# process simple events
else:
# process all parameters and check+set them
for key, value in params.items():
if key not in self._all_parameters[cat]:
raise ValueError('event `{0}`: unknown parameter: {1}'.format(cat, key))
if key == 'T':
if value < 0.0: raise ValueError('event `{0}`: date must be positive'.format(cat))
change0.move(value)
elif key == 'N':
if value <= 0.0: raise ValueError('event `{0}`: size must be strictly positive'.format(cat))
change0.set_param(value)
elif key == 'M':
if value < 0.0: raise ValueError('event `{0}`: size cannot be negative'.format(cat))
change0.set_param(value)
elif key == 'G':
change0.set_param(value)
elif key == 's':
if value < 0.0 or value > 1.0: raise ValueError('event `{0}`: selfing rate must be between 0 and 1'.format(cat))
change0.set_param(value)
elif key == 'R':
if value < 0.0: raise ValueError('event `{0}`: recombination rate must be positive'.format(cat))
change0.set_param(value)
elif key == 'S':
if value < 0.0: raise ValueError('event `{0}`: size must be positive'.format(cat))
change0.set_param(value)
elif key == 'proba':
if value < 0.0 or value > 1.0: raise ValueError('event `{0}`: probability must be between 0 and 1'.format(cat))
change0.set_param(value)
elif key == 'idx':
if value < 0 or value > self._npop: raise ValueError('event `{0}`: population index out of range'.format(cat))
change0.set_index(value)
elif key == 'src':
test2 = True
if value < 0 or value > self._npop: raise ValueError('event `{0}`: population index out of range'.format(cat))
change0.set_index(value)
elif key == 'dst':
test2 = True
if value < 0 or value > self._npop: raise ValueError('event `{0}`: population index out of range'.format(cat))
change0.set_dest(value)
elif key == 'label':
if not isinstance(value, str): raise TypeError('label must be a string')
change0.set_label(value)
elif key == 'num_chrom':
test1 = True
if value < 0: raise ValueError('event `{0}`: number of samples must be positive'.format(cat))
change0.set_number1(value)
elif key == 'num_indiv':
test1 = True
if value < 0: raise ValueError('event `{0}`: number of samples must be positive'.format(cat))
change0.set_number2(value)
else:
raise RuntimeError('unexpected error')
# perform ad-hoc tests
if test1 and change0.get_number1() + change0.get_number2() == 0:
raise ValueError('event `{0}`: there must be at least one sample'.format(cat))
if test2 and change0.get_index() == change0.get_dest():
raise ValueError('event `{0}`: cannot source/destination populations cannot be the same'.format(cat))
# copy all parameters
self._events[event_index][0].update(params) | PypiClean |
/CloudFerry-1.55.2.tar.gz/CloudFerry-1.55.2/cloudferry/config.py | import collections
import itertools
import logging
import re
import marshmallow
from marshmallow import fields
import netaddr
from oslo_utils import importutils
from cloudferry.lib.utils import bases
from cloudferry.lib.utils import override
from cloudferry.lib.utils import query
from cloudferry import model
LOG = logging.getLogger(__name__)
DEFAULT_MIGRATION_LIST = [
'cloudferry.lib.os.migrate.keystone.TenantMigrationFlowFactory',
'cloudferry.lib.os.migrate.glance.ImageMigrationFlowFactory',
'cloudferry.lib.os.migrate.glance.ImageMemberMigrationFlowFactory',
'cloudferry.lib.os.migrate.cinder.VolumeMigrationFlowFactory',
]
DEFAULT_DISCOVERER_LIST = [
'cloudferry.lib.os.discovery.node.ComputeNodeDiscoverer',
'cloudferry.lib.os.discovery.keystone.UserDiscoverer',
'cloudferry.lib.os.discovery.keystone.TenantDiscoverer',
'cloudferry.lib.os.discovery.keystone.RoleDiscoverer',
'cloudferry.lib.os.discovery.keystone.UserRoleDiscoverer',
'cloudferry.lib.os.discovery.glance.ImageDiscoverer',
'cloudferry.lib.os.discovery.glance.ImageMemberDiscoverer',
'cloudferry.lib.os.discovery.cinder.VolumeDiscoverer',
'cloudferry.lib.os.discovery.cinder.AttachmentDiscoverer',
'cloudferry.lib.os.discovery.neutron.QuotaDiscoverer',
'cloudferry.lib.os.discovery.neutron.NetworkDiscoverer',
'cloudferry.lib.os.discovery.neutron.SubnetDiscoverer',
'cloudferry.lib.os.discovery.nova.FlavorDiscoverer',
'cloudferry.lib.os.discovery.nova.ServerDiscoverer',
]
class ValidationError(bases.ExceptionWithFormatting):
pass
class ConfigSchema(marshmallow.Schema):
config_section_class = None
@marshmallow.validates_schema(pass_original=True)
def check_unknown_fields(self, _, original_data):
possible_fields = set()
for field_name, field in self.fields.items():
possible_fields.add(field.load_from or field_name)
for key in original_data:
if key not in possible_fields:
raise ValidationError(
'Unknown field provided for %s: %s' % (
self.config_section_class.__name__, key))
@marshmallow.post_load
def to_config_section(self, data):
# pylint: disable=not-callable
assert self.config_section_class is not None
return self.config_section_class(**data)
def handle_error(self, error, data):
# super(ConfigSchema, self).handle_error(error, data)
result = []
self._format_messages(result, [], error.message)
raise ValidationError(*result)
@classmethod
def _format_messages(cls, result, keys, messages):
# According to marshmallow docs, error.message can be an error message,
# list of error messages, or dict of error messages.
if isinstance(messages, dict):
cls._format_error_dict(result, keys, messages)
elif isinstance(messages, list):
cls._format_error_list(result, keys, messages)
elif isinstance(messages, basestring):
cls._format_error_str(result, keys, messages)
@classmethod
def _format_error_dict(cls, result, keys, messages):
assert isinstance(messages, dict)
for field, message in messages.items():
cls._format_messages(result, keys + [field], message)
@classmethod
def _format_error_list(cls, result, keys, messages):
assert isinstance(messages, list)
for message in messages:
cls._format_messages(result, keys, message)
@classmethod
def _format_error_str(cls, result, keys, message):
assert isinstance(message, basestring)
if keys:
result.append(
'Error in {classname}:{keys}: {message}'.format(
classname=cls.config_section_class.__name__,
keys='->'.join(keys), message=message))
else:
result.append(message)
class ConfigSectionMetaclass(type):
def __new__(mcs, name, parents, dct):
schema_class = None
if parents == (ConfigSchema,):
# Trick to make pylint think that ConfigSection subclasses have all
# the methods of Schema so that it can check various methods that
# will go to schema (like marshmallow.validates_*).
parents = (bases.Hashable, bases.Representable)
else:
schema_fields = {}
for key in dct:
value = dct[key]
if isinstance(value, fields.FieldABC) or \
hasattr(value, '__marshmallow_tags__'):
schema_fields[key] = value
for key in schema_fields:
del dct[key]
schema_class = type(name + 'Schema', (ConfigSchema,),
schema_fields)
dct['schema_class'] = schema_class
config_section_class = super(ConfigSectionMetaclass, mcs).__new__(
mcs, name, parents, dct)
if schema_class is not None:
schema_class.config_section_class = config_section_class
return config_section_class
class ConfigSection(ConfigSchema):
__metaclass__ = ConfigSectionMetaclass
schema_class = None
def __init__(self, **kwargs):
super(ConfigSection, self).__init__()
# pylint: disable=not-callable
if self.schema_class is None:
return
for field_name in self.schema_class().fields:
# Check that field wasn't initialized in derived class
if not hasattr(self, field_name):
if field_name not in kwargs:
raise ValidationError('Configuration key missing: %s',
field_name)
setattr(self, field_name, kwargs.pop(field_name))
assert not kwargs, 'kwargs should only contain field values'
class ClassList(fields.Field):
default_error_messages = {
'import': 'failed to import {classname}'
}
def __init__(self, initial_list):
super(ClassList, self).__init__(missing=list)
self.initial_list = initial_list
def _deserialize(self, value, attr, data):
if not isinstance(value, list):
self.fail('type')
result = []
for class_qualname in itertools.chain(self.initial_list, value):
try:
result.append(importutils.import_class(class_qualname))
except ImportError:
self.fail('import', classname=class_qualname)
return result
class DictField(fields.Field):
def __init__(self, key_field, nested_field, **kwargs):
super(DictField, self).__init__(**kwargs)
self.key_field = key_field
self.nested_field = nested_field
def _deserialize(self, value, attr, data):
if not isinstance(value, dict):
self.fail('type')
ret = {}
for key, val in value.items():
k = self.key_field.deserialize(key)
v = self.nested_field.deserialize(val)
ret[k] = v
return ret
class IPNetworkField(fields.String):
default_error_messages = {
'invalid': 'Not a valid IPv4 network string.'
}
def __init__(self, *args, **kwargs):
super(IPNetworkField, self).__init__(*args, **kwargs)
def _deserialize(self, value, attr, data):
string_val = super(IPNetworkField, self)._deserialize(
value, attr, data)
try:
return netaddr.IPNetwork(string_val, version=4)
except netaddr.AddrFormatError:
self.fail('invalid')
class PortRangeField(fields.String):
default_error_messages = {
'invalid': 'Not a valid IPv4 network string.'
}
regex = re.compile(r'^([0-9]+)-([0-9]+)$')
def __init__(self, *args, **kwargs):
super(PortRangeField, self).__init__(*args, **kwargs)
def _deserialize(self, value, attr, data):
string_val = super(PortRangeField, self)._deserialize(
value, attr, data)
match = self.regex.match(string_val)
if match:
return int(match.group(1)), int(match.group(2))
else:
self.fail('invalid')
class OverrideRulesField(fields.Dict):
def __init__(self, *args, **kwargs):
super(OverrideRulesField, self).__init__(*args, **kwargs)
def _deserialize(self, value, attr, data):
dict_val = super(OverrideRulesField, self)._deserialize(
value, attr, data)
result = {}
try:
for attr_name, rules in dict_val.items():
result[attr_name] = [override.OverrideRule(attr_name, rule)
for rule in rules]
return result
except TypeError as ex:
raise marshmallow.ValidationError(ex.message)
class FirstFit(fields.Field):
def __init__(self, *args, **kwargs):
many = kwargs.pop('many', False)
super(FirstFit, self).__init__(**kwargs)
self.many = many
self.variants = args
def _deserialize(self, value, attr, data):
if self.many:
return [self._do_deserialize(v) for v in value]
else:
return self._do_deserialize(value)
def _do_deserialize(self, value):
errors = []
for field in self.variants:
try:
return field.deserialize(value)
except marshmallow.ValidationError as ex:
errors.append(ex)
raise marshmallow.ValidationError([e.messages for e in errors])
class OneOrMore(fields.Field):
def __init__(self, base_type, **kwargs):
super(OneOrMore, self).__init__(**kwargs)
self.base_type = base_type
def _deserialize(self, value, attr, data):
# pylint: disable=protected-access
if isinstance(value, collections.Sequence) and \
not isinstance(value, basestring):
return [self.base_type._deserialize(v, attr, data)
for v in value]
else:
return [self.base_type._deserialize(value, attr, data)]
class SshGateway(ConfigSection):
hostname = fields.String()
port = fields.Integer(missing=22)
username = fields.String()
password = fields.String(missing=None)
private_key = fields.String(missing=None)
gateway = fields.Nested('self', missing=None)
connection_attempts = fields.Integer(missing=1)
attempt_failure_sleep = fields.Float(missing=10.0)
class SshSettings(ConfigSection):
port = fields.Integer(missing=22)
username = fields.String(required=True)
password = fields.String(missing=None)
gateway = fields.Nested(SshGateway.schema_class, missing=None)
connection_attempts = fields.Integer(missing=1)
cipher = fields.String(missing=None)
private_key = fields.String(missing=None)
timeout = fields.Integer(missing=600)
attempt_failure_sleep = fields.Float(missing=10.0)
class Scope(ConfigSection):
project_name = fields.String(missing=None)
project_id = fields.String(missing=None)
domain_id = fields.String(missing=None)
@marshmallow.validates_schema(skip_on_field_errors=True)
def check_migration_have_correct_source_and_dict(self, data):
if all(data[k] is None for k in self.fields.keys()):
raise ValidationError('At least one of %s shouldn\'t be None',
self.fields.keys())
class Credential(ConfigSection):
auth_url = fields.Url()
username = fields.String()
password = fields.String()
region_name = fields.String(missing=None)
domain_id = fields.String(missing=None)
https_insecure = fields.Boolean(missing=False)
https_cacert = fields.String(missing=None)
endpoint_type = fields.String(missing='admin')
def database_settings(database_name):
class_name = database_name.capitalize() + 'DatabaseSettings'
global_vars = globals()
if class_name in global_vars:
return global_vars[class_name]
class DatabaseSettings(ConfigSection):
host = fields.String(missing='localhost')
port = fields.Integer(missing=3306)
username = fields.String(missing=database_name)
password = fields.String()
database = fields.String(missing=database_name)
DatabaseSettings.__name__ = class_name
global_vars[class_name] = DatabaseSettings
return DatabaseSettings
class OpenstackCloud(ConfigSection):
name = fields.String()
request_attempts = fields.Integer(missing=3)
request_failure_sleep = fields.Float(missing=5)
operation_timeout = fields.Float(missing=90.0)
unused_network = IPNetworkField(missing='10.192.0.0/16')
unused_port_range = PortRangeField(missing='40000-50000')
admin_role = fields.String(missing='admin')
credential = fields.Nested(Credential.schema_class)
scope = fields.Nested(Scope.schema_class)
ssh_settings = fields.Nested(SshSettings.schema_class, load_from='ssh')
discoverers = ClassList(DEFAULT_DISCOVERER_LIST)
keystone_db = fields.Nested(database_settings('keystone').schema_class,
required=True)
nova_db = fields.Nested(database_settings('nova').schema_class,
required=True)
neutron_db = fields.Nested(database_settings('neutron').schema_class,
required=True)
glance_db = fields.Nested(database_settings('glance').schema_class,
required=True)
cinder_db = fields.Nested(database_settings('cinder').schema_class,
required=True)
access_networks = fields.List(IPNetworkField(), missing=list)
access_iface = fields.String(allow_none=True, missing=None)
@marshmallow.validates_schema(skip_on_field_errors=True)
def check_cloud_have_access_net_or_iface(self, data):
if not data['access_networks'] and not data['access_iface']:
raise marshmallow.ValidationError(
'Values for `access_net` or `access_iface` are required '
'to be specified in cloud configuration')
def __init__(self, **kwargs):
self.discoverers = collections.OrderedDict()
for discoverer_cls in kwargs.pop('discoverers'):
self.discoverers[discoverer_cls.discovered_class] = discoverer_cls
super(OpenstackCloud, self).__init__(**kwargs)
class Migration(ConfigSection):
source = fields.String(required=True)
destination = fields.String(required=True)
objects = DictField(
fields.String(),
FirstFit(
fields.String(),
DictField(
fields.String(),
OneOrMore(fields.Raw())),
many=True),
required=True)
overrides = DictField(fields.String(),
OverrideRulesField(),
missing=dict)
migration_flow_factories = ClassList(DEFAULT_MIGRATION_LIST)
@marshmallow.validates_schema(skip_on_field_errors=True)
def check_override_rules(self, data):
overrides = data['overrides']
for object_type, ovr in overrides.items():
try:
model_cls = model.get_model(object_type)
except ImportError:
raise marshmallow.ValidationError(
'Invalid object type "{0}"'.format(object_type))
schema = model_cls.schema_class()
for attribute, _ in ovr.items():
if attribute not in schema.fields:
raise marshmallow.ValidationError(
'Invalid override rule: "{0}" schema don\'t have '
'"{1}" attribute.'.format(
object_type, attribute))
def __init__(self, source, destination, objects, migration_flow_factories,
overrides, **kwargs):
self.source = source
self.destination = destination
self.query = query.Query(objects)
self.migration_flow_factories = {}
# Migration logic can be extended through migration_flow_factories
# migration parameter
for factory_class in migration_flow_factories:
migrated_class = factory_class.migrated_class
self.migration_flow_factories[migrated_class] = factory_class
self.overrides = {model.get_model(k): v for k, v in overrides.items()}
super(Migration, self).__init__(objects=objects, **kwargs)
class Configuration(ConfigSection):
clouds = DictField(
fields.String(allow_none=False),
fields.Nested(OpenstackCloud.schema_class, default=dict),
required=True)
migrations = DictField(
fields.String(allow_none=False),
fields.Nested(Migration.schema_class, default=dict),
required=True)
@marshmallow.validates_schema(skip_on_field_errors=True)
def check_migration_have_correct_source_and_dict(self, data):
clouds = data['clouds']
migrations = data.get('migrations', {})
for migration_name, migration in migrations.items():
if migration.source not in clouds:
raise marshmallow.ValidationError(
'Migration "{0}" source "{1}" should be defined '
'in clouds'.format(migration_name, migration.source))
if migration.destination not in clouds:
raise marshmallow.ValidationError(
'Migration "{0}" destination "{1}" should be defined '
'in clouds'.format(migration_name, migration.destination))
@marshmallow.pre_load
def populate_cloud_names(self, data):
clouds = data['clouds']
for name, cloud in list(clouds.items()):
cloud = dict(cloud)
clouds[name] = cloud
cloud['name'] = name
def load(data):
"""
Loads and validates configuration
:param data: dictionary file loaded from discovery YAML
:return: Configuration instance
"""
# pylint: disable=not-callable
schema = Configuration.schema_class(strict=True)
return schema.load(data).data | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas_rhino/utilities/misc.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
try:
basestring
except NameError:
basestring = str
import os
import sys
import ast
from compas_rhino.forms import TextForm
from compas_rhino.forms import ImageForm
import System
import rhinoscriptsyntax as rs
import Rhino
import clr
clr.AddReference("Rhino.UI")
import Rhino.UI # noqa: E402
from Rhino.UI.Dialogs import ShowMessageBox # noqa: E402
try:
from compas_rhino.forms import PropertyListForm
except ImportError:
from Rhino.UI.Dialogs import ShowPropertyListBox
__all__ = [
"wait",
"get_tolerance",
"toggle_toolbargroup",
"pick_point",
"browse_for_folder",
"browse_for_file",
"print_display_on",
"display_message",
"display_text",
"display_image",
"display_html",
"update_settings",
"update_named_values",
"screenshot_current_view",
"select_folder",
"select_file",
"unload_modules",
]
# ==============================================================================
# Truly miscellaneous :)
# ==============================================================================
def screenshot_current_view(
path,
width=1920,
height=1080,
scale=1,
draw_grid=False,
draw_world_axes=False,
draw_cplane_axes=False,
background=False,
):
"""Take a screenshot of the current view.
Parameters
----------
path : str
The filepath for saving the screenshot.
Other Parameters
----------------
width : int, optional
height : int, optional
scale : float, optional
draw_grid : bool, optional
draw_world_axes : bool, optional
draw_cplane_axes : bool, optional
background : bool, optional
Returns
-------
bool
True if the command was successful.
False otherwise.
"""
properties = [draw_grid, draw_world_axes, draw_cplane_axes, background]
properties = ["Yes" if item else "No" for item in properties]
scale = max(1, scale) # the rhino command requires a scale > 1
rs.EnableRedraw(True)
rs.Sleep(0)
result = rs.Command(
'-_ViewCaptureToFile "' + os.path.abspath(path) + '"'
" Width="
+ str(width)
+ " Height="
+ str(height)
+ " Scale="
+ str(scale)
+ " DrawGrid="
+ properties[0]
+ " DrawWorldAxes="
+ properties[1]
+ " DrawCPlaneAxes="
+ properties[2]
+ " TransparentBackground="
+ properties[3]
+ " _enter",
False,
)
rs.EnableRedraw(False)
return result
def wait():
"""Make Rhino wait to prevent the spinning wheel from appearing."""
return Rhino.RhinoApp.Wait()
def get_tolerance():
"""Get the absolute tolerance.
Returns
-------
float
The tolerance.
"""
return rs.UnitAbsoluteTolerance()
def toggle_toolbargroup(rui, group):
if not os.path.exists(rui) or not os.path.isfile(rui):
return
collection = rs.IsToolbarCollection(rui)
if not collection:
collection = rs.OpenToolbarCollection(rui)
if rs.IsToolbar(collection, group, True):
rs.ShowToolbar(collection, group)
else:
if rs.IsToolbar(collection, group, True):
if rs.IsToolbarVisible(collection, group):
rs.HideToolbar(collection, group)
else:
rs.ShowToolbar(collection, group)
def pick_point(message="Pick a point."):
point = rs.GetPoint(message)
if point:
return list(point)
return None
# ==============================================================================
# File system
# ==============================================================================
def browse_for_folder(message=None, default=None):
return rs.BrowseForFolder(folder=default, message=message, title="compas")
select_folder = browse_for_folder
def browse_for_file(title=None, folder=None, filter=None):
if filter == "json":
filter = "JSON files (*.json)|*.json||"
elif filter == "obj":
filter = "OBJ files (*.obj)|*.obj||"
elif filter == "fofin":
filter = "FOFIN session files (*.fofin)|*.fofin||"
else:
pass
return rs.OpenFileName(title, filter=filter, folder=folder)
select_file = browse_for_file
# ==============================================================================
# Display
# ==============================================================================
def print_display_on(on=True):
if on:
rs.Command("_PrintDisplay State On Color Display Thickness 1 _Enter")
else:
rs.Command("_PrintDisplay State Off _Enter")
def display_message(message):
return ShowMessageBox(message, "Message")
def display_text(text, title="Text", width=800, height=600):
if isinstance(text, (list, tuple)):
text = "{0}".format(System.Environment.NewLine).join(text)
form = TextForm(text, title, width, height)
return form.show()
def display_image(image, title="Image", width=800, height=600):
form = ImageForm(image, title, width, height)
return form.show()
def display_html():
raise NotImplementedError
# ==============================================================================
# Settings and attributes
# ==============================================================================
def update_named_values(names, values, message="", title="Update named values", evaluate=False):
try:
dialog = PropertyListForm(names, values)
except Exception:
values = ShowPropertyListBox(message, title, names, values)
else:
if dialog.ShowModal(Rhino.UI.RhinoEtoApp.MainWindow):
values = dialog.values
else:
values = None
if evaluate:
if values:
values = list(values)
for i in range(len(values)):
value = values[i]
try:
value = ast.literal_eval(value)
except (TypeError, ValueError, SyntaxError):
pass
values[i] = value
return values
def update_settings(settings, message="", title="Update settings"):
names = sorted(settings.keys())
values = [str(settings[name]) for name in names]
values = update_named_values(names, values, message=message, title=title)
if values:
values = list(values)
for name, value in zip(names, values):
try:
settings[name] = ast.literal_eval(value)
except (TypeError, ValueError, SyntaxError):
settings[name] = value
return True
return False
def unload_modules(top_level_module_name):
"""Unloads all modules named starting with the specified string.
This function eases the development workflow when editing a library that is
used from Rhino/Grasshopper.
Parameters
----------
top_level_module_name : :obj:`str`
Name of the top-level module to unload.
Returns
-------
list
List of unloaded module names.
"""
modules = filter(lambda m: m.startswith(top_level_module_name), sys.modules)
for module in modules:
sys.modules.pop(module)
return modules | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/utils/six.py |
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
### Additional customizations for Django ###
if PY3:
_assertRaisesRegex = "assertRaisesRegex"
else:
_assertRaisesRegex = "assertRaisesRegexp"
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
add_move(MovedModule("_dummy_thread", "dummy_thread"))
add_move(MovedModule("_thread", "thread")) | PypiClean |
/AutoYOLObile-0.0.10.tar.gz/AutoYOLObile-0.0.10/ModelOpt/AutoYOLO/autorun.py | import os
from ModelOpt.AutoYOLO.train import main_autorun
from ModelOpt.AutoYOLO.test import test_autorun
class YOLO_Autorun:
def print_info(self, info):
print('---------------------------------------------------')
print(info)
print('---------------------------------------------------')
def run(self):
str1 = input("Please enter the accuracy (mAP 0.5) requirement (default 48):")
str2 = input("Please enter the latency requirement (default 5.2):")
str3 = input("Please enter the device number, split by ',' (default 0,1,2,3):")
#acc_req = requirement['accuracy']
#speed_req = requirement['inference_time']
if str1 != '':
acc_req = float(str1)
else:
acc_req = 48
if str2 != '':
speed_req = float(str2)
else:
speed_req = 5.2
if str3 != '':
device = str(str3)
else:
device = '0,1,2,3'
print('The accuracy requirement is {}'.format(acc_req))
print('The speed requirement is {}'.format(speed_req))
if acc_req > 48 and acc_req < 50:
prune_file = 'config_csdarknet53pan_v14'
sparsity = 'block-punched'
elif acc_req >= 50 and acc_req < 51:
prune_file = 'config_csdarknet53pan_v10'
sparsity = 'block-punched'
elif acc_req >= 51 and acc_req < 52:
prune_file = 'config_csdarknet53pan_v8'
sparsity = 'block-punched'
elif acc_req >= 52:
prune_file = 'config_csdarknet53pan_v4'
sparsity = 'block-punched'
else:
prune_file = 'config_csdarknet53pan_v14'
sparsity = 'block-punched'
# ckpts_prune = 'ckpts/ckpt_a'
# ckpts_retrain = 'ckpts/ckpt_m'
# ckpts_final = 'ckpts/ckpt_f'
# if not os.path.exists(ckpts_prune):
# os.makedirs(ckpts_prune)
# if not os.path.exists(ckpts_retrain):
# os.makedirs(ckpts_retrain)
# if not os.path.exists(ckpts_final):
# os.makedirs(ckpts_final)
upperpath = 'ModelOpt/AutoYOLO/'
cmd_prune = 'python {}train.py --admm --epoch 25 --cfg {}cfg/csdarknet53s-panet-spp.cfg --data {}data/coco2014.data --weights weights/yolov4dense.pt --device {} --config-file {} --sparsity-type {}'.format(upperpath, upperpath, upperpath, device, prune_file, sparsity)
cmd_retrain = 'python {}train.py --masked-retrain --epoch 280 --cfg {}cfg/csdarknet53s-panet-spp.cfg --data {}data/coco2014.data --device {} --config-file {} --sparsity-type {} --multi-scale'.format(upperpath, upperpath, upperpath, device, prune_file, sparsity)
# cmd_test_prune = 'python pytorch/test_mask.py --model_dir={}/best_checkpoints --config-file={} --sparsity-type={}'.format(ckpts_prune, prune_file, sparsity)
cmd_test_retrain = 'python {}test.py --device {} --cfg {}cfg/csdarknet53s-panet-spp.cfg --data {}data/coco2014.data --weights {}weights/best.pt'.format(upperpath, device, upperpath, upperpath, upperpath)
# copy_ckpts_original = 'cp ckpts/ckpts_original/* {}'.format(ckpts_prune)
# os.system(copy_ckpts_original)
# print_info('obtain original model')
self.print_info('start prune.')
# print(cmd_prune)
# os.system(cmd_prune)
main_autorun(['--admm',
'--epoch', '25',
'--cfg', 'cfg/csdarknet53s-panet-spp.cfg',
'--data', 'data/coco2014.data',
'--weights', 'weights/yolov4dense.pt',
'--device', device,
'--config-file', prune_file,
'--sparsity-type', sparsity])
# print_info('finish admm prune process')
# copy_ckpts_prune = 'cp {}/best_checkpoints/* {}'.format(ckpts_prune, ckpts_retrain)
# os.system(copy_ckpts_prune)
# print_info('obtain pruned model')
self.print_info('finish prune, start retrain.')
# print(cmd_retrain)
# os.system(cmd_retrain)
main_autorun(['--masked-retrain',
'--epoch', '280',
'--cfg', 'cfg/csdarknet53s-panet-spp.cfg',
'--data', 'data/coco2014.data',
'--multi-scale',
'--device', device,
'--config-file', prune_file,
'--sparsity-type', sparsity])
self.print_info('finish retrain with the above accuracy.')
# print(cmd_test_retrain)
# os.system(cmd_test_retrain)
# test_autorun(['--device', '0',
# '--cfg', 'cfg/csdarknet53s-panet-spp.cfg',
# '--data', 'data/coco2014.data',
# '--weights', 'weights/best.pt'])
# self.print_info('finish retrain with the above accuracy')
self.print_info('obtain final model')
self.print_info('you can find the model at ./weights/best.pt')
if __name__ == "__main__":
ac = YOLO_Autorun()
ac.run() | PypiClean |
/GENDIS-1.0.14.tar.gz/GENDIS-1.0.14/README.md | # GENDIS [](https://travis-ci.org/IBCNServices/GENDIS) [](https://badge.fury.io/py/GENDIS) [](https://gendis.readthedocs.io/en/latest/?badge=latest) [](https://pepy.tech/project/gendis)
## GENetic DIscovery of Shapelets
In the time series classification domain, shapelets are small subseries that are discriminative for a certain class. It has been shown that by projecting the original dataset to a distance space, where each axis corresponds to the distance to a certain shapelet, classifiers are able to achieve state-of-the-art results on a plethora of datasets.
This repository contains an implementation of `GENDIS`, an algorithm that searches for a set of shapelets in a genetic fashion. The algorithm is insensitive to its parameters (such as population size, crossover and mutation probability, ...) and can quickly extract a small set of shapelets that is able to achieve predictive performances similar (or better) to that of other shapelet techniques.
## Installation
We currently support Python 3.5 & Python 3.6. For installation, there are two alternatives:
1. Clone the repository `https://github.com/IBCNServices/GENDIS.git` and run `(python3 -m) pip -r install requirements.txt`
2. GENDIS is hosted on PyPi. You can just run `(python3 -m) pip install gendis` to add gendis to your dist-packages (you can use it from everywhere).
**Make sure NumPy and Cython is already installed (`pip install numpy` and `pip install Cython`), since that is required for the setup script.**
## Tutorial & Example
### 1. Loading & preprocessing the datasets
In a first step, we need to construct at least a matrix with timeseries (`X_train`) and a vector with labels (`y_train`). Additionally, test data can be loaded as well in order to evaluate the pipeline in the end.
```python
import pandas as pd
# Read in the datafiles
train_df = pd.read_csv(<DATA_FILE>)
test_df = pd.read_csv(<DATA_FILE>)
# Split into feature matrices and label vectors
X_train = train_df.drop('target', axis=1)
y_train = train_df['target']
X_test = test_df.drop('target', axis=1)
y_test = test_df['target']
```
### 2. Creating a `GeneticExtractor` object
Construct the object. For a list of all possible parameters, and a description, please refer to the documentation in the [code](gendis/genetic.py)
```python
from gendis.genetic import GeneticExtractor
genetic_extractor = GeneticExtractor(population_size=50, iterations=25, verbose=False,
normed=False, add_noise_prob=0.3, add_shapelet_prob=0.3,
wait=10, plot='notebook', remove_shapelet_prob=0.3,
crossover_prob=0.66, n_jobs=4, max_len=len(X_train) // 2)
```
### 3. Fit the `GeneticExtractor` and construct distance matrix
```python
shapelets = genetic_extractor.fit(X_train, y_train)
distances_train = genetic_extractor.transform(X_train)
distances_test = genetic_extractor.transform(X_test)
```
### 4. Fit ML classifier on constructed distance matrix
```python
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr = LogisticRegression()
lr.fit(distances_train, y_train)
print('Accuracy = {}'.format(accuracy_score(y_test, lr.predict(distances_test))))
```
### Example notebook
A simple example is provided in [this notebook](gendis/example.ipynb)
## Data
All datasets in this repository are downloaded from [timeseriesclassification](http://timeseriesclassification.com). Please refer to them appropriately when using any dataset.
## Paper experiments
In order to reproduce the results from the corresponding paper, please check out [this directory](gendis/experiments).
## Tests
We provide a few doctests and unit tests. To run the doctests: `python3 -m doctest -v <FILE>`, where `<FILE>` is the Python file you want to run the doctests from. To run unit tests: `nose2 -v`
## Contributing, Citing and Contact
If you have any questions, are experiencing bugs in the GENDIS implementation, or would like to contribute, please feel free to create an issue/pull request in this repository or take contact with me at gilles(dot)vandewiele(at)ugent(dot)be
If you use GENDIS in your work, please use the following citation:
```
@misc{v2019gendis,
title={GENDIS: GENetic DIscovery of Shapelets},
author={Gilles Vandewiele and Femke Ongenae and Filip De Turck},
year={2019},
eprint={1910.12948},
archivePrefix={arXiv},
primaryClass={cs.NE}
}
```
| PypiClean |
/ChiantiPy-0.15.1.tar.gz/ChiantiPy-0.15.1/docs/build/_static/language_data.js | var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"];
/* Non-minified version JS is _stemmer.js if file is provided */
/**
* Porter Stemmer
*/
var Stemmer = function() {
var step2list = {
ational: 'ate',
tional: 'tion',
enci: 'ence',
anci: 'ance',
izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
};
var step3list = {
icate: 'ic',
ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
}
}
var splitChars = (function() {
var result = {};
var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648,
1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702,
2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971,
2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345,
3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761,
3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823,
4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125,
8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695,
11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587,
43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141];
var i, j, start, end;
for (i = 0; i < singles.length; i++) {
result[singles[i]] = true;
}
var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709],
[722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161],
[1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568],
[1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807],
[1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047],
[2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383],
[2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450],
[2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547],
[2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673],
[2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820],
[2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946],
[2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023],
[3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173],
[3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332],
[3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481],
[3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718],
[3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791],
[3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095],
[4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205],
[4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687],
[4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968],
[4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869],
[5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102],
[6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271],
[6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592],
[6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822],
[6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167],
[7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959],
[7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143],
[8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318],
[8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483],
[8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101],
[10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567],
[11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292],
[12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444],
[12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783],
[12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311],
[19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511],
[42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774],
[42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071],
[43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263],
[43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519],
[43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647],
[43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967],
[44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295],
[57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274],
[64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007],
[65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381],
[65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]];
for (i = 0; i < ranges.length; i++) {
start = ranges[i][0];
end = ranges[i][1];
for (j = start; j <= end; j++) {
result[j] = true;
}
}
return result;
})();
function splitQuery(query) {
var result = [];
var start = -1;
for (var i = 0; i < query.length; i++) {
if (splitChars[query.charCodeAt(i)]) {
if (start !== -1) {
result.push(query.slice(start, i));
start = -1;
}
} else if (start === -1) {
start = i;
}
}
if (start !== -1) {
result.push(query.slice(start));
}
return result;
} | PypiClean |
/Fangorn-0.3.2.tar.gz/Fangorn-0.3.2/README.txt | .. image:: https://bitbucket-badges.atlassian.io/badge/saaj/fangorn.svg?ref=default
:target: https://bitbucket.org/saaj/fangorn/addon/pipelines/home
.. image:: https://codecov.io/bitbucket/saaj/fangorn/branch/default/graph/badge.svg
:target: https://codecov.io/bitbucket/saaj/fangorn/branch/default
.. image:: https://badge.fury.io/py/Fangorn.png
:target: https://pypi.python.org/pypi/Fangorn
*******
Fangorn
*******
Nested Sets aka Modified Pre-order Tree Traversal (MPTT) *SQL* tree implemented in Python
for *MySQL* and *SQLite*. Uses both traversal markup (left, right) and adjacency list
parentId for more ad-hoc query flexibility.
Provides tree structure validation and "memorisation" via SQLite *:memory:* for quick reads.
Example
=======
We want to achieve the following tree. Node is represented by ``name id → parentId (l, r)``.
To output a tree this way ``fangorn.test.visualize`` function can be used.
.. sourcecode:: text
R 1 → None (1, 18)
└─A1 2 → 1 (2, 5)
└─B1 3 → 2 (3, 4)
└─A2 4 → 1 (6, 13)
└─B2 5 → 4 (7, 8)
└─B3 6 → 4 (9, 12)
└─C1 7 → 6 (10, 11)
└─A3 8 → 1 (14, 17)
└─B4 9 → 8 (15, 16)
First we need a table to represent the tree. And we want a tree node to have a name.
.. sourcecode:: python
import MySQLdb as mysql
conn = mysql.connect(user = 'guest', db = 'test')
conn.query('''
CREATE TABLE `node` (
`node_id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`parent_id` int(10) unsigned DEFAULT NULL,
`l` int(10) unsigned NOT NULL,
`r` int(10) unsigned NOT NULL,
`name` varchar(8) NOT NULL,
PRIMARY KEY (`node_id`),
KEY `l` (`l`),
KEY `r` (`r`),
KEY `parent_id` (`parent_id`),
CONSTRAINT `node_has_node` FOREIGN KEY (`parent_id`)
REFERENCES `node` (`node_id`)
ON DELETE CASCADE
ON UPDATE CASCADE
) ENGINE=InnoDB;
''')
Now we can create tree instance. Note that DAO that the tree relies on is expected to support
*named* DB-API paramstyle (i.e. `WHERE node_id = :nodeId`). Also transaction control methods
are recommended to implement nested transaction support. However test suite requires nested
transactions to run. For `MySQLdb` and `sqlite3` there're compatibility wrappers under
`fangorn.compat`.
.. sourcecode:: python
import fangorn
from fangorn.compat.mysqldb import Mysqldb as MysqldbWrapper
tree = fangorn.NestedSetsTree(MysqldbWrapper(conn), 'node', ('name',))
rId = tree.add(dict(name = 'R'))
a1Id = tree.add(dict(name = 'A1'), parentId = rId)
tree.add(dict(name = 'B1'), parentId = a1Id)
a2Id = tree.add(dict(name = 'A2'), parentId = rId)
b2Id = tree.add(dict(name = 'B2'), parentId = a2Id)
b3Id = tree.add(dict(name = 'B3'), prevId = b2Id)
tree.add(dict(name = 'C1'), parentId = b3Id)
a3Id = tree.add(dict(name = 'A3'), parentId = rId)
tree.add(dict(name = 'B4'), parentId = a3Id)
tree.move(a1Id, rId)
tree.move(a3Id, prevId = a2Id)
Now we can play with the tree.
.. sourcecode:: python
print(tree.isDescendantOf(a2Id, 4)) # False
print(tree.isDescendantOf(a2Id, 6)) # True
print(tree.isDescendantOf(a2Id, 7)) # True
print(tree.isDescendantOf(a2Id, 9)) # False
print([n['name'] for n in tree.getChildren(a2Id)]) # ['B2', 'B3']
print([n['name'] for n in tree.getDescendants(a2Id)]) # ['B2', 'B3', 'C1']
print([n['name'] for n in tree.getPath(7)]) # ['R', 'A2', 'B3', 'C1']
print(tree.getNode(8)) # {'left': 14L, 'right': 17L, 'nodeId': 8L, 'name': 'A3', 'parentId': 1L}
print(tree.getParent(8)) # {'left': 1L, 'right': 18L, 'nodeId': 1L, 'name': 'R', 'parentId': None}
print(tree.getRoot()) # {'left': 1L, 'right': 18L, 'nodeId': 1L, 'name': 'R', 'parentId': None}
tree.edit(1, dict(name = 'RR'))
print(tree.getRoot()) # {'left': 1L, 'right': 18L, 'nodeId': 1L, 'name': 'RR', 'parentId': None}
print([n['name'] for n in tree.getDescendants(a2Id)] # ['B2', 'B3', 'C1'])
tree.remove(b3Id)
print([n['name'] for n in tree.getDescendants(a2Id)] # ['B2'])
For more usage examples look at project's
`test suite <https://bitbucket.org/saaj/fangorn/src/default/fangorn/test/>`_.
| PypiClean |
/Heterogeneous_Highway_Env-0.0.3-py3-none-any.whl/Heteogeneous_Highway_Env/vehicle/uncertainty/prediction.py | import copy
from typing import List, Tuple, Callable, Union, TYPE_CHECKING
import numpy as np
from highway_env import utils
from highway_env.interval import polytope, vector_interval_section, integrator_interval, \
interval_negative_part, intervals_diff, intervals_product, LPV, interval_absolute_to_local, \
interval_local_to_absolute
from highway_env.road.road import Route, LaneIndex, Road
from highway_env.utils import Vector
from highway_env.vehicle.behavior import LinearVehicle
from highway_env.vehicle.controller import MDPVehicle
from highway_env.vehicle.kinematics import Vehicle
if TYPE_CHECKING:
from highway_env.vehicle.objects import RoadObject
Polytope = Tuple[np.ndarray, List[np.ndarray]]
class IntervalVehicle(LinearVehicle):
"""
Estimator for the interval-membership of a LinearVehicle under parameter uncertainty.
The model trajectory is stored in a model_vehicle, and the lower and upper bounds of the states are stored
in a min_vehicle and max_vehicle. Note that these vehicles do not follow a proper Vehicle dynamics, and
are only used for storage of the bounds.
"""
def __init__(self,
road: Road,
position: Vector,
heading: float = 0,
speed: float = 0,
target_lane_index: LaneIndex = None,
target_speed: float = None,
route: Route = None,
enable_lane_change: bool = True,
timer: float = None,
theta_a_i: List[List[float]] = None,
theta_b_i: List[List[float]] = None,
data: dict = None) -> None:
"""
:param theta_a_i: The interval of possible acceleration parameters
:param theta_b_i: The interval of possible steering parameters
"""
super().__init__(road,
position,
heading,
speed,
target_lane_index,
target_speed,
route,
enable_lane_change,
timer)
self.theta_a_i = theta_a_i if theta_a_i is not None else LinearVehicle.ACCELERATION_RANGE
self.theta_b_i = theta_b_i if theta_b_i is not None else LinearVehicle.STEERING_RANGE
self.data = data
self.interval = VehicleInterval(self)
self.trajectory = []
self.interval_trajectory = []
self.longitudinal_lpv, self.lateral_lpv = None, None
self.previous_target_lane_index = self.target_lane_index
@classmethod
def create_from(cls, vehicle: LinearVehicle) -> "IntervalVehicle":
v = cls(vehicle.road,
vehicle.position,
heading=vehicle.heading,
speed=vehicle.speed,
target_lane_index=getattr(vehicle, 'target_lane_index', None),
target_speed=getattr(vehicle, 'target_speed', None),
route=getattr(vehicle, 'route', None),
timer=getattr(vehicle, 'timer', None),
theta_a_i=getattr(vehicle, 'theta_a_i', None),
theta_b_i=getattr(vehicle, 'theta_b_i', None),
data=getattr(vehicle, "data", None))
return v
def step(self, dt: float, mode: str = "partial") -> None:
self.store_trajectories()
if self.crashed:
self.interval = VehicleInterval(self)
else:
if mode == "partial":
# self.observer_step(dt)
self.partial_observer_step(dt)
elif mode == "predictor":
self.predictor_step(dt)
super().step(dt)
def observer_step(self, dt: float) -> None:
"""
Step the interval observer dynamics
:param dt: timestep [s]
"""
# Input state intervals
position_i = self.interval.position
v_i = self.interval.speed
psi_i = self.interval.heading
# Features interval
front_interval = self.get_front_interval()
# Acceleration features
phi_a_i = np.zeros((2, 3))
phi_a_i[:, 0] = [0, 0]
if front_interval:
phi_a_i[:, 1] = interval_negative_part(
intervals_diff(front_interval.speed, v_i))
# Lane distance interval
lane_psi = self.lane.heading_at(self.lane.local_coordinates(self.position)[0])
lane_direction = [np.cos(lane_psi), np.sin(lane_psi)]
diff_i = intervals_diff(front_interval.position, position_i)
d_i = vector_interval_section(diff_i, lane_direction)
d_safe_i = self.DISTANCE_WANTED + self.TIME_WANTED * v_i
phi_a_i[:, 2] = interval_negative_part(intervals_diff(d_i, d_safe_i))
# Steering features
phi_b_i = None
lanes = self.get_followed_lanes()
for lane_index in lanes:
lane = self.road.network.get_lane(lane_index)
longitudinal_pursuit = lane.local_coordinates(self.position)[0] + self.speed * self.TAU_PURSUIT
lane_psi = lane.heading_at(longitudinal_pursuit)
_, lateral_i = interval_absolute_to_local(position_i, lane)
lateral_i = -np.flip(lateral_i)
i_v_i = 1/np.flip(v_i, 0)
phi_b_i_lane = np.transpose(np.array([
[0, 0],
intervals_product(lateral_i, i_v_i)]))
# Union of candidate feature intervals
if phi_b_i is None:
phi_b_i = phi_b_i_lane
else:
phi_b_i[0] = np.minimum(phi_b_i[0], phi_b_i_lane[0])
phi_b_i[1] = np.maximum(phi_b_i[1], phi_b_i_lane[1])
# Commands interval
a_i = intervals_product(self.theta_a_i, phi_a_i)
b_i = intervals_product(self.theta_b_i, phi_b_i)
# Speeds interval
keep_stability = False
if keep_stability:
dv_i = integrator_interval(v_i - self.target_speed, self.theta_a_i[:, 0])
else:
dv_i = intervals_product(self.theta_a_i[:, 0], self.target_speed - np.flip(v_i, 0))
dv_i += a_i
dv_i = np.clip(dv_i, -self.ACC_MAX, self.ACC_MAX)
keep_stability = True
if keep_stability:
delta_psi = list(map(utils.wrap_to_pi, psi_i - lane_psi))
d_psi_i = integrator_interval(delta_psi, self.theta_b_i[:, 0])
else:
d_psi_i = intervals_product(self.theta_b_i[:, 0], lane_psi - np.flip(psi_i, 0))
d_psi_i += b_i
# Position interval
cos_i = [-1 if psi_i[0] <= np.pi <= psi_i[1] else min(map(np.cos, psi_i)),
1 if psi_i[0] <= 0 <= psi_i[1] else max(map(np.cos, psi_i))]
sin_i = [-1 if psi_i[0] <= -np.pi/2 <= psi_i[1] else min(map(np.sin, psi_i)),
1 if psi_i[0] <= np.pi/2 <= psi_i[1] else max(map(np.sin, psi_i))]
dx_i = intervals_product(v_i, cos_i)
dy_i = intervals_product(v_i, sin_i)
# Interval dynamics integration
self.interval.speed += dv_i * dt
self.interval.heading += d_psi_i * dt
self.interval.position[:, 0] += dx_i * dt
self.interval.position[:, 1] += dy_i * dt
# Add noise
noise = 0.3
self.interval.position[:, 0] += noise * dt * np.array([-1, 1])
self.interval.position[:, 1] += noise * dt * np.array([-1, 1])
self.interval.heading += noise * dt * np.array([-1, 1])
def predictor_step(self, dt: float) -> None:
"""
Step the interval predictor dynamics
:param dt: timestep [s]
"""
# Create longitudinal and lateral LPVs
self.predictor_init()
# Detect lane change and update intervals of local coordinates with the new frame
if self.target_lane_index != self.previous_target_lane_index:
position_i = self.interval.position
target_lane = self.road.network.get_lane(self.target_lane_index)
previous_target_lane = self.road.network.get_lane(self.previous_target_lane_index)
longi_i, lat_i = interval_absolute_to_local(position_i, target_lane)
psi_i = self.interval.heading + \
target_lane.heading_at(longi_i.mean()) - previous_target_lane.heading_at(longi_i.mean())
x_i_local_unrotated = np.transpose([lat_i, psi_i])
new_x_i_t = self.lateral_lpv.change_coordinates(x_i_local_unrotated, back=False, interval=True)
delta = new_x_i_t.mean(axis=0) - self.lateral_lpv.x_i_t.mean(axis=0)
self.lateral_lpv.x_i_t += delta
x_i_local_unrotated = self.longitudinal_lpv.change_coordinates(self.longitudinal_lpv.x_i_t,
back=True,
interval=True)
x_i_local_unrotated[:, 0] = longi_i
new_x_i_t = self.longitudinal_lpv.change_coordinates(x_i_local_unrotated,
back=False,
interval=True)
self.longitudinal_lpv.x_i_t += new_x_i_t.mean(axis=0) - self.longitudinal_lpv.x_i_t.mean(axis=0)
self.previous_target_lane_index = self.target_lane_index
# Step
self.longitudinal_lpv.step(dt)
self.lateral_lpv.step(dt)
# Backward coordinates change
x_i_long = self.longitudinal_lpv.change_coordinates(self.longitudinal_lpv.x_i_t, back=True, interval=True)
x_i_lat = self.lateral_lpv.change_coordinates(self.lateral_lpv.x_i_t, back=True, interval=True)
# Conversion from rectified to true coordinates
target_lane = self.road.network.get_lane(self.target_lane_index)
position_i = interval_local_to_absolute(x_i_long[:, 0], x_i_lat[:, 0], target_lane)
self.interval.position = position_i
self.interval.speed = x_i_long[:, 2]
self.interval.heading = x_i_lat[:, 1]
def predictor_init(self) -> None:
"""Initialize the LPV models used for interval prediction."""
position_i = self.interval.position
target_lane = self.road.network.get_lane(self.target_lane_index)
longi_i, lat_i = interval_absolute_to_local(position_i, target_lane)
v_i = self.interval.speed
psi_i = self.interval.heading - self.lane.heading_at(longi_i.mean())
# Longitudinal predictor
if not self.longitudinal_lpv:
front_interval = self.get_front_interval()
# LPV specification
if front_interval:
f_longi_i, _ = interval_absolute_to_local(front_interval.position, target_lane)
f_pos = f_longi_i[0]
f_vel = front_interval.speed[0]
else:
f_pos, f_vel = 0, 0
x0 = [longi_i[0], f_pos, v_i[0], f_vel]
center = [-self.DISTANCE_WANTED - self.target_speed * self.TIME_WANTED,
0,
self.target_speed,
self.target_speed]
noise = 1
b = np.eye(4)
d = np.array([[1], [0], [0], [0]])
omega_i = np.array([[-1], [1]]) * noise
u = [[self.target_speed], [self.target_speed], [0], [0]]
a0, da = self.longitudinal_matrix_polytope()
self.longitudinal_lpv = LPV(x0, a0, da, b, d, omega_i, u, center=center)
# Lateral predictor
if not self.lateral_lpv:
# LPV specification
x0 = [lat_i[0], psi_i[0]]
center = [0, 0]
noise = 0.5
b = np.identity(2)
d = np.array([[1], [0]])
omega_i = np.array([[-1], [1]]) * noise
u = [[0], [0]]
a0, da = self.lateral_matrix_polytope()
self.lateral_lpv = LPV(x0, a0, da, b, d, omega_i, u, center=center)
def longitudinal_matrix_polytope(self) -> Polytope:
return IntervalVehicle.parameter_box_to_polytope(self.theta_a_i, self.longitudinal_structure)
def lateral_matrix_polytope(self) -> Polytope:
return IntervalVehicle.parameter_box_to_polytope(self.theta_b_i, self.lateral_structure)
@staticmethod
def parameter_box_to_polytope(parameter_box: np.ndarray, structure: Callable) -> Polytope:
a, phi = structure()
a_theta = lambda params: a + np.tensordot(phi, params, axes=[0, 0])
return polytope(a_theta, parameter_box)
def get_front_interval(self) -> "VehicleInterval":
# TODO: For now, we assume the front vehicle follows the models' front vehicle
front_vehicle, _ = self.road.neighbour_vehicles(self)
if front_vehicle:
if isinstance(front_vehicle, IntervalVehicle):
# Use interval from the observer estimate of the front vehicle
front_interval = front_vehicle.interval
else:
# The front vehicle trajectory interval is not being estimated, so it should be considered as certain.
# We use a new observer created from that current vehicle state, which will have full certainty.
front_interval = IntervalVehicle.create_from(front_vehicle).interval
else:
front_interval = None
return front_interval
def get_followed_lanes(self, lane_change_model: str = "model", squeeze: bool = True) -> List[LaneIndex]:
"""
Get the list of lanes that could be followed by this vehicle.
:param lane_change_model: - model: assume that the vehicle will follow the lane of its model behaviour.
- all: assume that any lane change decision is possible at any timestep
- right: assume that a right lane change decision is possible at any timestep
:param squeeze: if True, remove duplicate lanes (at boundaries of the road)
:return: the list of followed lane indexes
"""
lanes = []
if lane_change_model == "model":
lanes = [self.target_lane_index]
elif lane_change_model == "all":
lanes = self.road.network.side_lanes(self.target_lane_index) + [self.target_lane_index]
elif lane_change_model == "right":
lanes = [self.target_lane_index]
_from, _to, _id = self.target_lane_index
if _id < len(self.road.network.graph[_from][_to]) - 1 \
and self.road.network.get_lane((_from, _to, _id + 1)).is_reachable_from(self.position):
lanes += [(_from, _to, _id + 1)]
elif not squeeze:
lanes += [self.target_lane_index] # Right lane is also current lane
return lanes
def partial_observer_step(self, dt: float, alpha: float = 0) -> None:
"""
Step the boundary parts of the current state interval
1. Split x_i(t) into two upper and lower intervals x_i_-(t) and x_i_+(t)
2. Propagate their observer dynamics x_i_-(t+dt) and x_i_+(t+dt)
3. Merge the resulting intervals together to x_i(t+dt).
:param dt: timestep [s]
:param alpha: ratio of the full interval that defines the boundaries
"""
# 1. Split x_i(t) into two upper and lower intervals x_i_-(t) and x_i_+(t)
o = self.interval
v_minus = IntervalVehicle.create_from(self)
v_minus.interval = copy.deepcopy(self.interval)
v_minus.interval.position[1, :] = (1 - alpha) * o.position[0, :] + alpha * o.position[1, :]
v_minus.interval.speed[1] = (1 - alpha) * o.speed[0] + alpha * o.speed[1]
v_minus.interval.heading[1] = (1 - alpha) * o.heading[0] + alpha * o.heading[1]
v_plus = IntervalVehicle.create_from(self)
v_plus.interval = copy.deepcopy(self.interval)
v_plus.interval.position[0, :] = alpha * o.position[0, :] + (1 - alpha) * o.position[1, :]
v_plus.interval.speed[0] = alpha * o.speed[0] + (1 - alpha) * o.speed[1]
v_plus.interval.heading[0] = alpha * o.heading[0] + (1 - alpha) * o.heading[1]
# 2. Propagate their observer dynamics x_i_-(t+dt) and x_i_+(t+dt)
v_minus.road = copy.copy(v_minus.road)
v_minus.road.vehicles = [v if v is not self else v_minus for v in v_minus.road.vehicles]
v_plus.road = copy.copy(v_plus.road)
v_plus.road.vehicles = [v if v is not self else v_plus for v in v_plus.road.vehicles]
v_minus.observer_step(dt)
v_plus.observer_step(dt)
# 3. Merge the resulting intervals together to x_i(t+dt).
self.interval.position = np.array([v_minus.interval.position[0], v_plus.interval.position[1]])
self.interval.speed = np.array([v_minus.interval.speed[0], v_plus.interval.speed[1]])
self.interval.heading = np.array([min(v_minus.interval.heading[0], v_plus.interval.heading[0]),
max(v_minus.interval.heading[1], v_plus.interval.heading[1])])
def store_trajectories(self) -> None:
"""Store the current model, min and max states to a trajectory list."""
self.trajectory.append(LinearVehicle.create_from(self))
self.interval_trajectory.append(copy.deepcopy(self.interval))
def handle_collisions(self, other: 'RoadObject', dt: float) -> None:
"""
Worst-case collision check.
For robust planning, we assume that MDPVehicles collide with the uncertainty set of an IntervalVehicle,
which corresponds to worst-case outcome.
:param other: the other vehicle
:param dt: a timestep
"""
if not isinstance(other, MDPVehicle):
super().handle_collisions(other)
return
if not self.collidable or self.crashed or other is self:
return
# Fast rectangular pre-check
if not utils.point_in_rectangle(other.position,
self.interval.position[0] - self.LENGTH,
self.interval.position[1] + self.LENGTH):
return
# Projection of other vehicle to uncertainty rectangle. This is the possible position of this vehicle which is
# the most likely to collide with other vehicle
projection = np.minimum(np.maximum(other.position, self.interval.position[0]),
self.interval.position[1])
# Accurate rectangular check
if utils.rotated_rectangles_intersect((projection, self.LENGTH, self.WIDTH, self.heading),
(other.position, 0.9*other.LENGTH, 0.9*other.WIDTH, other.heading)):
self.speed = other.speed = min(self.speed, other.speed)
self.crashed = other.crashed = True
class VehicleInterval(object):
def __init__(self, vehicle: Vehicle) -> None:
self.position = np.array([vehicle.position, vehicle.position], dtype=float)
self.speed = np.array([vehicle.speed, vehicle.speed], dtype=float)
self.heading = np.array([vehicle.heading, vehicle.heading], dtype=float) | PypiClean |
/Bilals_pypkg-0.1.tar.gz/Bilals_pypkg-0.1/distributions/Gaussiandistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | PypiClean |
/3DCORE-1.1.4.tar.gz/3DCORE-1.1.4/py3dcore/fitting/methods/abcsmc_psd.py | import logging
import multiprocessing
import numba
import numpy as np
import os
import pickle
import py3dcore
import time
from py3dcore._extmath import cholesky
from py3dcore._extnumba import set_random_seed
from py3dcore.params import _numba_calculate_weights_reduce
class ABCSMC_PSD(object):
t_data = []
b_data = []
o_data = []
b_fft = []
mask = []
name = None
def __init__(self):
pass
def add_observation(self, t_data, b_data, o_data, fft_data):
"""Add magnetic field observation
Parameters
----------
t_data : np.ndarray
Time evaluation array.
b_data : np.ndarray
Magnetic field array.
o_data : np.ndarray
Observer position array.
"""
self.t_data.extend(t_data)
self.b_data.extend(b_data)
self.o_data.extend(o_data)
self.b_fft.append(fft_data)
_mask = [1] * len(b_data)
_mask[0] = 0
_mask[-1] = 0
self.mask.extend(_mask)
def init(self, t_launch, model, **kwargs):
"""ABC SMC initialization.
Parameters
----------
t_launch : datetime.datetime
Initial CME launch time.
model : Base3DCOREModel
3DCORE model class.
Other Parameters
----------------
seed : int
Random seed, by default 42.
set_params : dict
Dictionary containing parameters to fix to given value.
"""
logger = logging.getLogger(__name__)
set_params = kwargs.get("set_params", None)
self.t_launch = t_launch
self.model = model
self.parameters = py3dcore.params.Base3DCOREParameters(
model.default_parameters())
self.seed = kwargs.get("seed", 42)
pdict = self.parameters.params_dict
# fix parameters
if set_params:
for spkey, spval in set_params.items():
for key in pdict:
if key == spkey:
logger.info("setting \"%s\"=%.3f", key, spval)
pdict[key]["distribution"] = "fixed"
pdict[key]["fixed_value"] = spval
elif spkey.isdigit() and pdict[key]["index"] == int(spkey):
logger.info("setting \"%s\"=%.3f", key, spval)
pdict[key]["distribution"] = "fixed"
pdict[key]["fixed_value"] = spval
# disable gaussian noise
pdict["noise"]["distribution"] = "fixed"
pdict["noise"]["fixed_value"] = 0
self.parameters._update_arr()
# setup variables
self.iter_i = 0
self.acc_rej_hist = []
self.eps_hist = []
self.timer_hist = []
self.particles = None
self.weights = None
self.kernels = None
self.epses = None
self.particles_prev = None
self.weights_prev = None
def load(self, path):
logger = logging.getLogger(__name__)
if os.path.isdir(path):
files = os.listdir(path)
if len(files) > 0:
files.sort()
path = os.path.join(path, files[-1])
else:
raise FileNotFoundError
logger.info("loading particle file \"%s\"", path)
elif os.path.exists(path):
logger.info("loading particle file \"%s\"", path)
else:
raise FileNotFoundError
with open(path, "rb") as fh:
data = pickle.load(fh)
self.t_launch = data["t_launch"]
self.model = py3dcore.util.select_model(data["model"])
self.parameters = data["parameters"]
self.seed = data["seed"]
self.iter_i = len(data["eps_hist"]) - 2
self.acc_rej_hist = data["acc_rej_hist"]
self.eps_hist = data["eps_hist"]
self.timer_hist = data["timer_hist"]
self.particles = data["particles"]
self.weights = data["weights"]
self.kernels = data["kernels"]
self.epses = data["epses"]
self.profiles = data["profiles"]
self.particles_prev = None
self.weights_prev = None
self.t_data = data["t_data"]
self.b_data = data["b_data"]
self.o_data = data["o_data"]
self.b_fft = data["b_fft"]
self.mask = data["mask"]
self.name = os.path.basename(path)
def run(self, iter_end, particles, **kwargs):
"""Run ABC SMC algorithm.
Parameters
----------
iter_end : int
Maximum number of iterations.
particles : int
Number of particles per iteration, by default 4096.
Other Parameters
----------------
eps_quantile: float
Adaptive threshold stepping, by default 0.5.
jobs : int
Number of total jobs, by default 8.
kernel_mode : str
Transition kernel mode, by default "lcm".
output : str
Output folder.
runs : int
Number of model runs per worker, by default 2**16.
sub_iter_max : int
Maximum number of sub iterations, by default 50.
workers : int
Number of parallel workers, by default 8.
toffset : float
T marker offsets in hours, by default None.
"""
eps_quantile = kwargs.get("eps_quantile", .5)
jobs = kwargs.get("jobs", 8)
kernel_mode = kwargs.get("kernel_mode", "lcm")
output = kwargs.get("output", None)
runs = kwargs.get("runs", 16)
sub_iter_max = kwargs.get("sub_iter_max", 50)
workers = kwargs.get("workers", 8)
toff = kwargs.get("toffset", None)
kill_flag = False
pool = multiprocessing.Pool(processes=workers)
logger = logging.getLogger(__name__)
if len(self.eps_hist) == 0:
eps_0 = rmse([np.zeros((1, 3))] * len(self.b_data), self.b_data)[0]
self.eps_hist = [2 * eps_0, 2 * eps_0 * 0.98]
logger.info("starting abc algorithm, eps_0 = %0.2fnT", self.eps_hist[-1])
for iter_i in range(self.iter_i, iter_end):
logger.info("starting iteration %i", iter_i)
timer_iter = time.time()
if iter_i > 0:
# switch particles/particles_prev
_tp = self.particles_prev
self.particles_prev = self.particles
_tw = self.weights_prev
self.weights_prev = self.weights
# decompose particle kernels
if kernel_mode == "cm":
kernels_lower = cholesky(2 * self.kernels)
elif kernel_mode == "lcm":
kernels_lower = cholesky(2 * self.kernels)
else:
kernels_lower = None
sub_iter_i = 0
boost = 0
rseed = self.seed + 100000 * iter_i
_results = pool.starmap(abcsmc_worker, [(iter_i, self.model, self.t_launch,
self.t_data, self.b_data, self.o_data,
self.b_fft,
self.mask, self.parameters, self.eps_hist[-1],
rseed + i, self.particles_prev, self.weights_prev,
kernels_lower, runs, boost, logger)
for i in range(jobs)])
total_runs = jobs * int(2**runs)
# perform additional runs if insufficient particles are collected
while True:
tlens = [len(jp[1]) for jp in _results]
tlen = sum(tlens)
self.particles = np.zeros((tlen, len(self.parameters)), dtype=np.float32)
self.epses = np.zeros((tlen, ), dtype=np.float32)
self.profiles = np.zeros((tlen, len(self.b_data), 3), dtype=np.float32)
acc_rej = np.array([0, 0, 0])
for i in range(0, len(_results)):
self.particles[sum(tlens[:i]):sum(tlens[:i + 1])] = _results[i][0]
self.epses[sum(tlens[:i]):sum(
tlens[:i + 1])] = _results[i][1]
self.profiles[sum(tlens[:i]):sum(tlens[:i + 1])] = _results[i][2]
acc_rej += _results[i][3]
logger.info("step %i:%i with (%i/%i) particles", iter_i, sub_iter_i, tlen,
particles)
if tlen > particles:
break
# adaptive run boosting
dr = 19 - runs - boost
if dr > 0:
exp = particles / ((tlen + 1) * (sub_iter_i + 1))
if exp > 8 and dr > 3:
boost += 3
elif exp > 4 and dr > 2:
boost += 2
elif exp > 2:
boost += 1
rseed = self.seed + 100000 * iter_i + 1000 * (sub_iter_i + 1)
_results_ext = pool.starmap(abcsmc_worker, [(iter_i, self.model, self.t_launch,
self.t_data, self.b_data, self.o_data,
self.b_fft,
self.mask, self.parameters,
self.eps_hist[-1], rseed + i,
self.particles_prev, self.weights_prev, kernels_lower,
runs, boost, logger)
for i in range(jobs)])
_results.extend(_results_ext)
sub_iter_i += 1
total_runs += jobs * int(2**(runs+boost))
# kill conditions
if sub_iter_i == 5 + boost:
if tlen * np.floor(sub_iter_max / 5) < particles:
logger.warning("expected to exceed maximum number of sub iterations (%i)",
sub_iter_max)
logger.warning("aborting")
kill_flag = True
break
if kill_flag:
break
logger.info("%.2f%% acc, %.2f%% hit, %.2f%% rej", 100 * acc_rej[0] / total_runs,
100 * acc_rej[1] / total_runs, 100 * acc_rej[2] / total_runs)
if tlen > particles:
self.particles = self.particles[:particles]
self.epses = self.epses[:particles]
if iter_i > 0:
self.weights = np.ones((particles,), dtype=np.float32)
self.parameters.weight(self.particles, self.particles_prev, self.weights,
self.weights_prev, self.kernels)
self.weights[np.where(self.weights == np.nan)] = 0
else:
self.weights = np.ones((particles,), dtype=np.float32) / particles
# set new eps
self.eps_hist.append(np.quantile(self.epses, eps_quantile))
logger.info("setting new eps: %.3f => %.3f", self.eps_hist[-2], self.eps_hist[-1])
# compute transition kernels
if kernel_mode == "cm":
self.kernels = np.cov(self.particles, rowvar=False, aweights=self.weights)
# due to aweights sometimes very small numbers are generated
self.kernels[np.where(self.kernels < 1e-14)] = 0
elif kernel_mode == "lcm":
kernels_cm = np.cov(self.particles, rowvar=False, aweights=self.weights)
# due to aweights sometimes very small numbers are generated
kernels_cm[np.where(kernels_cm < 1e-14)] = 0
kernels_cm_inv = np.linalg.pinv(kernels_cm)
logger.info("generating local kernels")
self.kernels = np.array(pool.starmap(generate_kernels_lcm, [(i, self.particles, kernels_cm_inv) for i in range(particles)]))
self.acc_rej_hist.append(acc_rej)
self.timer_hist.append(time.time() - timer_iter)
logger.info("step %i done, %i particles, %.2fM runs in %.2f seconds, (total: %s)",
iter_i, particles, total_runs / 1e6, time.time() - timer_iter,
time.strftime("%Hh %Mm %Ss", time.gmtime(np.sum(self.timer_hist))))
self.iter_i = iter_i
if output:
self.save(output)
pool.close()
def save(self, path):
logger = logging.getLogger(__name__)
if not os.path.exists(path):
os.makedirs(path)
if os.path.isdir(path):
path = os.path.join(path, "{0:02d}".format(self.iter_i))
data = {
"particles": self.particles,
"weights": self.weights,
"kernels": self.kernels,
"epses": self.epses,
"profiles": self.profiles,
"acc_rej_hist": self.acc_rej_hist,
"eps_hist": self.eps_hist,
"timer_hist": self.timer_hist,
"t_launch": self.t_launch,
"model": self.model.__name__,
"parameters": self.parameters,
"seed": self.seed,
"t_data": self.t_data,
"b_data": self.b_data,
"o_data": self.o_data,
"b_fft": self.b_fft,
"mask": self.mask
}
logger.info("saving file \"%s\"", path)
with open(path, "wb") as fh:
pickle.dump(data, fh)
def abcsmc_worker(iter_i, model, t_launch, t_data, b_data, o_data, b_fft, mask,
parameters, eps, seed, particles, weights, kernels_lower, runs, boost, logger):
model_obj = model(t_launch, int(2**(runs + boost)),
parameters=parameters, use_gpu=False)
#logger.info("starting worker")
if iter_i == 0:
model_obj.generate_iparams(seed=seed)
else:
set_random_seed(seed)
model_obj.perturb_iparams(particles, weights, kernels_lower)
profiles = np.array(model_obj.sim_fields(t_data, o_data))
#logger.info("generated profiles")
# generate PSD fluctuations for each component and observation
obsc = len(b_fft)
for i in range(3):
dloffset = 0
for j in range(obsc):
datalen = len(b_fft[j])
wni = np.fft.fft(np.random.normal(0, 1, size=(int(2**(runs + boost)), datalen)))
noise = np.real(np.fft.ifft(wni * b_fft[j]) / np.sqrt(datalen)).T
nullf = (profiles[1 + dloffset:dloffset + (datalen + 2) - 1, :, i] != 0)
profiles[1 + dloffset:dloffset + (datalen + 2) - 1, :, i][nullf] += noise[nullf]
dloffset += datalen + 2
if obsc > 1:
# compute max error for each observation
errors = []
for i in range(obsc):
obslc = slice(i * (datalen + 2), (i + 1) * (datalen + 2))
errors.append(rmse(profiles[obslc], b_data[obslc], mask=mask[obslc]))
error = np.max(errors, axis=0)
else:
error = rmse(profiles, b_data, mask=mask)
accept_mask = error < eps
rej_mask = np.sum(error == np.inf)
rej_error = np.sum((error != np.inf) & (error >= eps))
acc_count = np.sum(accept_mask)
return model_obj.iparams_arr[accept_mask], error[accept_mask], \
np.swapaxes(profiles, 0, 1)[accept_mask], \
np.array([acc_count, rej_error, rej_mask])
def generate_kernels_lcm(i, particles, kernel_cm_inv):
distances = np.array([_numba_calculate_weights_reduce(particles[i], particles[j], kernel_cm_inv) for j in range(len(particles))])
cutoff = np.median(distances)
return np.cov(particles[np.where(distances < cutoff)], rowvar=False)
def rmse(values, reference, mask=None, use_gpu=False):
"""Compute RMSE between numerous generated 3DCORE profiles and a reference profile. If a
mask is given, profiles that are masked if their values are not non-zero where the filter is
set to non-zero.
Parameters
----------
values : Union[list[np.ndarray], list[numba.cuda.cudadrv.devicearray.DeviceNDArray]]
List of magnetic field outputs.
reference : Union[np.ndarray, numba.cuda.cudadrv.devicearray.DeviceNDArray]
Reference magnetic field measurements.
mask : np.ndarray, optional
Mask array, by default None
use_gpu : bool, optional
GPU flag, by default False
"""
if use_gpu:
raise NotImplementedError
else:
rmse = np.zeros(len(values[0]))
if mask is not None:
for i in range(len(reference)):
_error_rmse(values[i], reference[i], mask[i], rmse)
rmse = np.sqrt(rmse / len(values))
mask_arr = np.copy(rmse)
for i in range(len(reference)):
_error_mask(values[i], mask[i], mask_arr)
return mask_arr
else:
for i in range(len(reference)):
_error_rmse(values[i], reference[i], 1, rmse)
rmse = np.sqrt(rmse / len(values))
return rmse
@numba.njit
def _error_mask(values_t, mask, rmse):
for i in numba.prange(len(values_t)):
_v = np.sum(values_t[i]**2)
if (_v > 0 and mask == 0) or (_v == 0 and mask != 0):
rmse[i] = np.inf
@numba.njit
def _error_rmse(values_t, ref_t, mask, rmse):
for i in numba.prange(len(values_t)):
if mask == 1:
rmse[i] += np.sum((values_t[i] - ref_t)**2) | PypiClean |
/Cessa-1.2.5rc1.tar.gz/Cessa-1.2.5rc1/pyke/pattern.py |
import types
import itertools
class pattern(object):
def __ne__(self, b): return not (self == b)
def simple_match_pattern(self, bindings, my_context, pattern_b, b_context):
return self.match_pattern(bindings, my_context, pattern_b, b_context)
def lookup(self, context, allow_variable_in_ans = False):
return self
class pattern_literal(pattern):
def __init__(self, literal):
self.literal = literal
def __hash__(self): return hash(self.literal)
def __eq__(self, b):
if isinstance(b, pattern_literal): return self.literal == b.literal
return self.literal == b
def match_data(self, bindings, my_context, data):
return self.literal == data
def match_pattern(self, bindings, my_context, pattern_b, b_context):
if isinstance(pattern_b, pattern_literal):
return self.literal == pattern_b.literal
return pattern_b.match_data(bindings, b_context, self.literal)
def as_data(self, my_context, allow_vars = False, final = None):
return self.literal
def is_data(self, my_context):
return True
class pattern_tuple(pattern):
def __init__(self, elements, rest_var = None):
self.elements = tuple(elements)
self.rest_var = rest_var
def __hash__(self):
return hash(self.elements) ^ hash(self.rest_var)
def __eq__(self, b):
return isinstance(b, pattern_tuple) and \
self.elements == b.elements and self.rest_var == b.rest_var
def match_data(self, bindings, my_context, data):
if isinstance(data, str): return False
try:
data = tuple(data)
except TypeError:
return False
if len(self.elements) > len(data) or \
self.rest_var is None and len(self.elements) < len(data):
return False
for x, y in zip(self.elements, data):
if not x.match_data(bindings, my_context, y): return False
if self.rest_var is not None:
return self.rest_var.match_data(bindings, my_context,
tuple(data[len(self.elements):]))
return True
def simple_match_pattern(self, bindings, my_context, pattern_b, b_context):
return self, my_context
def match_pattern(self, bindings, my_context, pattern_b, b_context):
simple_ans = pattern_b.simple_match_pattern(bindings, b_context,
self, my_context)
if isinstance(simple_ans, bool): return simple_ans
pattern_b, b_context = simple_ans
if not isinstance(pattern_b, pattern):
return self.match_data(bindings, my_context, pattern_b)
assert isinstance(pattern_b, pattern_tuple), "Internal logic error"
my_len = len(self.elements)
b_len = len(pattern_b.elements)
if pattern_b.rest_var is None and my_len > b_len or \
self.rest_var is None and my_len < b_len:
return False
for x, y in zip(self.elements, pattern_b.elements):
if not x.match_pattern(bindings, my_context, y, b_context):
return False
if my_len <= b_len and self.rest_var is not None:
# This is where the two rest_vars are bound together if my_len ==
# b_len.
tail_val, tail_context = pattern_b._tail(my_len, b_context)
if tail_context is None:
if not self.rest_var.match_data(bindings, my_context, tail_val):
return False
else:
if not self.rest_var.match_pattern(bindings, my_context,
tail_val, tail_context):
return False
elif pattern_b.rest_var is not None:
tail_val, tail_context = self._tail(b_len, my_context)
if tail_context is None:
if not pattern_b.rest_var.match_data(bindings, b_context,
tail_val):
return False
else:
if not pattern_b.rest_var.match_pattern(bindings, b_context,
tail_val, tail_context):
return False
return True
def as_data(self, my_context, allow_vars = False, final = None):
ans = tuple(x.as_data(my_context, allow_vars, final)
for x in self.elements)
if self.rest_var is None:
return ans
rest = my_context.lookup_data(self.rest_var.name, allow_vars, final)
if isinstance(rest, tuple): return ans + rest
return ans + ('*' + rest,)
def _tail(self, n, my_context):
""" Return a copy of myself with the first n elements removed.
"""
if n == len(self.elements):
if self.rest_var is None: return (), None
return self.rest_var, my_context
rest_elements = self.elements[n:]
if self.rest_var is None and \
all(isinstance(x, pattern_literal) for x in rest_elements):
return tuple(x.literal for x in rest_elements), None
return pattern_tuple(self.elements[n:], self.rest_var), my_context
def is_data(self, my_context):
arg_test = all(arg_pat.is_data(my_context) for arg_pat in self.elements)
if not arg_test or self.rest_var is None: return arg_test
return self.rest_var.is_data(my_context) | PypiClean |
/Gbtestapi-0.1a10-py3-none-any.whl/gailbot/plugins/loader/urlloader.py | import os
import re
import boto3
from botocore.exceptions import ParamValidationError
from cryptography.fernet import Fernet
from typing import Dict, List, Union, TypedDict, Tuple
from abc import ABC
from .directoryloader import PluginDirectoryLoader
from .pluginLoader import PluginLoader
from ..suite import PluginSuite
from src.gailbot.core.utils.logger import makelogger
from src.gailbot.core.utils.general import get_extension, read_toml, delete
from src.gailbot.configs import PLUGIN_CONFIG
from src.gailbot.core.utils.download import download_from_urls, is_internet_connected
from urllib.parse import urlparse
from urllib import request
logger = makelogger("url_loader")
class UrlLoader(ABC):
"""base class for loading plugin from url"""
def __init__(self, download_dir, suites_dir) -> None:
self.download_dir = download_dir
self.suites_dir = suites_dir
self.dir_loader = PluginDirectoryLoader(suites_dir)
super().__init__()
def is_supported_url(self, url: str) -> bool:
"""
check if the url is supported
"""
raise NotImplementedError
def load(self, url: str) -> List[PluginSuite]:
"""load the source from the url"""
raise NotImplementedError
class PluginURLLoader(PluginLoader):
"""
plugin loader to download and load plugin suite from url,
the loader can currently recognize and download plugin suite from
github
"""
def __init__(self, download_dir: str, suites_dir: str) -> None:
super().__init__()
self.download_dir = download_dir
self.url_loaders: List[UrlLoader] = [
GitHubURLLoader(download_dir, suites_dir),
S3ZipLoader(download_dir, suites_dir),
S3BucketLoader(download_dir, suites_dir),
]
@property
def supported_url_source(self):
"""return a list of supported url downloading source"""
return ["github", "amazon s3"]
@staticmethod
def is_valid_url(self, url: str) -> bool:
"""
check if the url string is valid
Args:
url (str): a string that represent the url
Returns:
bool: true if the string is valid url false otherwise
"""
for loader in self.url_loaders:
if loader.is_supported_url(url):
return True
return False
def load(self, url: str) -> Union[PluginSuite, bool]:
"""
load the plugin suite from the url if the url is supported by the
list of the loader available,
Args:
url (str): url string
Returns:
PluginSuite: loaded plugin suite object if the url is supported
Bool: return false if the url is not supported by current url loader
"""
if not is_internet_connected():
return False
for loader in self.url_loaders:
suites = loader.load(url)
if isinstance(suites, List):
return suites
return False
class GitHubURLLoader(UrlLoader):
"""load plugin from an url source"""
def __init__(self, download_dir, suites_dir) -> None:
"""initialize the plugin loader
Args:
download_dir (str): path to where the plugin suite will be downloaded
suites_dir (str): path to where the plugin will be stored after
download
"""
super().__init__(download_dir, suites_dir)
def is_supported_url(self, url: str) -> bool:
"""given a url, returns true if the url is supported by the
github loader
Args:
url (str): the url string
"""
parsed = urlparse(url)
if parsed.scheme == "https" and parsed.netloc == "github.com":
return True
else:
return False
def load(self, url: str) -> List[PluginSuite]:
"""download the plugin from a given url and stored a copy of the
plugins in the suites directory
Args:
url (str): url for downloading the suite
suites_directory (str): path to where a copy of the plugin suite
will be store
Returns:
PluginSuite: return a PluginSuite object that stores the loaded suite
"""
# check if the type is valid
if not self.is_supported_url(url):
return False
download_path = download_from_urls(
urls=[url], download_dir=self.download_dir, unzip=True
)[0]
# get the suite name from the toml file
for root, dirs, files in os.walk(download_path):
if PLUGIN_CONFIG.CONFIG in files:
config = os.path.join(root, PLUGIN_CONFIG.CONFIG)
suite_name = read_toml(config)["suite_name"]
break
# get the suite directory
for dirpath, dirnames, filename in os.walk(download_path):
if suite_name in dirnames:
suite_path = os.path.join(dirpath, suite_name)
logger.info(f"the directory path is {suite_path}")
break
# Move to the suites dir.
suites = self.dir_loader.load(suite_path)
delete(download_path)
return suites
class S3ZipLoader(UrlLoader):
"""load plugin from an url source"""
def __init__(self, download_dir, suites_dir) -> None:
"""initialize the plugin loader
Args:
download_dir (str): path to where the plugin suite will be downloaded
suites_dir (str): path to where the plugin will be stored after
download
"""
super().__init__(download_dir, suites_dir)
def is_supported_url(self, url: str) -> bool:
"""given a url, returns true if the url is supported by the
github loader
Args:
url (str): the url string
"""
regex = r"^https?://[a-zA-Z0-9.-]+\.s3\.[a-z]{2}-[a-z]+-\d{1,2}\.amazonaws\.com/.*\.zip$"
match = re.match(regex, url)
return bool(match)
def load(self, url: str) -> PluginSuite:
"""download the plugin from a given url and stored a copy of the
plugins in the suites directory
Args:
url (str): url for downloading the suite
suites_directory (str): path to where a copy of the plugin suite
will be store
Returns:
PluginSuite: return a PluginSuite object that stores the loaded suite
"""
# check if the type is valid
if not self.is_supported_url(url):
logger.info(f"url is not a supported url")
return False
download_path = download_from_urls(
urls=[url], download_dir=self.download_dir, unzip=True
)[0]
logger.info(f"file is downloaded to {download_path}")
# get the suite name from the toml file
for root, dirs, files in os.walk(download_path):
if PLUGIN_CONFIG.CONFIG in files:
config = os.path.join(root, PLUGIN_CONFIG.CONFIG)
suite_name = read_toml(config)["suite_name"]
break
# get the suite directory
for dirpath, dirnames, filename in os.walk(download_path):
if suite_name in dirnames:
suite_path = os.path.join(dirpath, suite_name)
logger.info(f"the directory path is {suite_path}")
break
# Move to the suites dir.
suites = self.dir_loader.load(suite_path)
delete(download_path)
return suites
class S3BucketLoader(UrlLoader):
"""load plugin from an url source"""
def __init__(self, download_dir, suites_dir) -> None:
"""initialize the plugin loader
Args:
download_dir (str): path to where the plugin suite will be downloaded
suites_dir (str): path to where the plugin will be stored after
download
"""
self.download_dir = download_dir
self.suites_dir = suites_dir
try:
self.fernet = Fernet(PLUGIN_CONFIG.EN_KEY)
self.aws_api_key = self.fernet.decrypt(
PLUGIN_CONFIG.ENCRYPTED_API_KEY
).decode()
self.aws_id = self.fernet.decrypt(PLUGIN_CONFIG.ENCRYPTED_API_ID).decode()
except Exception as e:
logger.error(e, exc_info=e)
self.fernet = None
self.aws_api_key = None
self.aws_id = None
def is_supported_url(self, bucket: str) -> bool:
"""given a url, returns true if the url is supported by the
github loader
Args:
url (str): the url string
"""
r3 = boto3.resource(
"s3", aws_access_key_id=self.aws_id, aws_secret_access_key=self.aws_api_key
)
try:
r3.meta.client.head_bucket(Bucket=bucket)
return True
except ParamValidationError:
return False
except Exception as e:
logger.error(e, exc_info=e)
return False
def load(self, bucket: str) -> List[PluginSuite]:
"""download the plugin from a given url and stored a copy of the
plugins in the suites directory
Args:
url (str): url for downloading the suite
suites_directory (str): path to where a copy of the plugin suite
will be store
Returns:
PluginSuite: return a PluginSuite object that stores the loaded suite
"""
if not self.is_supported_url(bucket):
return False
s3 = boto3.client(
"s3", aws_access_key_id=self.aws_id, aws_secret_access_key=self.aws_api_key
)
pluginsuites = []
# get all object from teh bucket
objects = s3.list_objects_v2(Bucket=bucket)["Contents"]
for obj in objects:
key = obj["Key"]
if "zip" in key:
# Generate a presigned URL for the object
url = s3.generate_presigned_url(
"get_object", Params={"Bucket": bucket, "Key": key}, ExpiresIn=3600
)
url = url[0 : url.index("?")]
logger.info(f"loading plugin from url {url}")
ZipLoader = S3ZipLoader(self.download_dir, self.suites_dir)
pluginsuites.extend(ZipLoader.load(url))
return pluginsuites | PypiClean |
/Abe-0.7.2.tar.gz/Abe-0.7.2/README-MYSQL.txt | Abe setup for MySQL.
Run the Bitcoin client to ensure that your copy of the block chain is
up to date.
Install Python 2.7 and pycrypto. The Debian/Ubuntu packages are
python2.7 and python-crypto.
Install MySQL 5.x server and MySQL-Python. On Debian/Ubuntu:
mysql-server-5.1 and python-mysqldb.
Configure the MySQL instance with InnoDB engine support. Often,
InnoDB is enabled by default. To check for InnoDB support, issue
"SHOW ENGINES" and look in the output for "InnoDB" with "YES" next to
it. If "skip-innodb" appears in the server configuration (my.cnf or
my.ini) then remove it and restart the server.
Log into MySQL as root (e.g.: mysql -u root) and issue the following,
replacing "PASSWORD" with a password you choose:
create database abe;
CREATE USER abe IDENTIFIED BY 'PASSWORD';
grant all on abe.* to abe;
Create file abe-my.conf with the following contents, replacing
"PASSWORD" as above:
dbtype MySQLdb
connect-args {"user":"abe","db":"abe","passwd":"PASSWORD"}
upgrade
port 2750
Perform the initial data load:
python -m Abe.abe --config abe-my.conf --commit-bytes 100000 --no-serve
Look for output such as:
block_tx 1 1
block_tx 2 2
...
This step may take several days depending on chain size and hardware.
Then run the web server as:
python -m Abe.abe --config abe-my.conf
You should see:
Listening on http://localhost:2750
Verify the installation by browsing the URL shown.
| PypiClean |
/Flask-AppBuilder-hack-1.12.5.tar.gz/Flask-AppBuilder-hack-1.12.5/flask_appbuilder/baseviews.py | from inspect import isclass
import json
import logging
from datetime import datetime, date
from flask import Blueprint, session, flash, render_template, url_for, abort
from ._compat import as_unicode
from .forms import GeneralModelConverter
from .widgets import FormWidget, ShowWidget, ListWidget, SearchWidget
from .actions import ActionItem
from .urltools import *
log = logging.getLogger(__name__)
def expose(url='/', methods=('GET',)):
"""
Use this decorator to expose views on your view classes.
:param url:
Relative URL for the view
:param methods:
Allowed HTTP methods. By default only GET is allowed.
"""
def wrap(f):
if not hasattr(f, '_urls'):
f._urls = []
f._urls.append((url, methods))
return f
return wrap
def expose_api(name='', url='', methods=('GET',), description=''):
def wrap(f):
api_name = name or f.__name__
api_url = url or "/api/{0}".format(name)
if not hasattr(f, '_urls'):
f._urls = []
f._extra = {}
f._urls.append((api_url, methods))
f._extra[api_name] = (api_url, f.__name__, description)
return f
return wrap
class BaseView(object):
"""
All views inherit from this class.
it's constructor will register your exposed urls on flask as a Blueprint.
This class does not expose any urls, but provides a common base for all views.
Extend this class if you want to expose methods for your own templates
"""
appbuilder = None
blueprint = None
endpoint = None
route_base = None
""" Override this if you want to define your own relative url """
template_folder = 'templates'
""" The template folder relative location """
static_folder = 'static'
""" The static folder relative location """
base_permissions = None
"""
List with allowed base permission.
Use it like this if you want to restrict your view to readonly::
class MyView(ModelView):
base_permissions = ['can_list','can_show']
"""
default_view = 'list'
""" the default view for this BaseView, to be used with url_for (method name) """
extra_args = None
""" dictionary for injecting extra arguments into template """
_apis = None
def __init__(self):
"""
Initialization of base permissions
based on exposed methods and actions
Initialization of extra args
"""
if self.base_permissions is None:
self.base_permissions = set()
for attr_name in dir(self):
if hasattr(getattr(self, attr_name), '_permission_name'):
permission_name = getattr(getattr(self, attr_name), '_permission_name')
self.base_permissions.add('can_' + permission_name)
self.base_permissions = list(self.base_permissions)
if not self.extra_args:
self.extra_args = dict()
self._apis = dict()
for attr_name in dir(self):
if hasattr(getattr(self, attr_name), '_extra'):
_extra = getattr(getattr(self, attr_name), '_extra')
for key in _extra: self._apis[key] = _extra[key]
def create_blueprint(self, appbuilder,
endpoint=None,
static_folder=None):
"""
Create Flask blueprint. You will generally not use it
:param appbuilder:
the AppBuilder object
:param endpoint:
endpoint override for this blueprint, will assume class name if not provided
:param static_folder:
the relative override for static folder, if omitted application will use the appbuilder static
"""
# Store appbuilder instance
self.appbuilder = appbuilder
# If endpoint name is not provided, get it from the class name
self.endpoint = endpoint or self.__class__.__name__
if self.route_base is None:
self.route_base = '/' + self.__class__.__name__.lower()
self.static_folder = static_folder
if not static_folder:
# Create blueprint and register rules
self.blueprint = Blueprint(self.endpoint, __name__,
url_prefix=self.route_base,
template_folder=self.template_folder)
else:
self.blueprint = Blueprint(self.endpoint, __name__,
url_prefix=self.route_base,
template_folder=self.template_folder,
static_folder=static_folder)
self._register_urls()
return self.blueprint
def _register_urls(self):
for attr_name in dir(self):
attr = getattr(self, attr_name)
if hasattr(attr, '_urls'):
for url, methods in attr._urls:
self.blueprint.add_url_rule(url,
attr_name,
attr,
methods=methods)
def render_template(self, template, **kwargs):
"""
Use this method on your own endpoints, will pass the extra_args
to the templates.
:param template: The template relative path
:param kwargs: arguments to be passed to the template
"""
kwargs['base_template'] = self.appbuilder.base_template
kwargs['appbuilder'] = self.appbuilder
return render_template(template, **dict(list(kwargs.items()) + list(self.extra_args.items())))
def _prettify_name(self, name):
"""
Prettify pythonic variable name.
For example, 'HelloWorld' will be converted to 'Hello World'
:param name:
Name to prettify.
"""
return re.sub(r'(?<=.)([A-Z])', r' \1', name)
def _prettify_column(self, name):
"""
Prettify pythonic variable name.
For example, 'hello_world' will be converted to 'Hello World'
:param name:
Name to prettify.
"""
return re.sub('[._]', ' ', name).title()
def update_redirect(self):
"""
Call it on your own endpoint's to update the back history navigation.
If you bypass it, the next submit or back will go over it.
"""
page_history = Stack(session.get('page_history', []))
page_history.push(request.url)
session['page_history'] = page_history.to_json()
def get_redirect(self):
"""
Returns the previous url.
"""
index_url = self.appbuilder.get_url_for_index
page_history = Stack(session.get('page_history', []))
if page_history.pop() is None:
return index_url
session['page_history'] = page_history.to_json()
url = page_history.pop() or index_url
return url
@classmethod
def get_default_url(cls, **kwargs):
"""
Returns the url for this class default endpoint
"""
return url_for(cls.__name__ + '.' + cls.default_view, **kwargs)
def get_uninit_inner_views(self):
"""
Will return a list with views that need to be initialized.
Normally related_views from ModelView
"""
return []
def get_init_inner_views(self, views):
"""
Sets initialized inner views
"""
pass
class BaseFormView(BaseView):
"""
Base class FormView's
"""
form_template = 'appbuilder/general/model/edit.html'
edit_widget = FormWidget
""" Form widget to override """
form_title = ''
""" The form title to be displayed """
form_columns = None
""" The form columns to include, if empty will include all"""
form = None
""" The WTF form to render """
form_fieldsets = None
""" Form field sets """
default_view = 'this_form_get'
""" The form view default entry endpoint """
def _init_vars(self):
self.form_columns = self.form_columns or []
self.form_fieldsets = self.form_fieldsets or []
list_cols = [field.name for field in self.form.refresh()]
if self.form_fieldsets:
self.form_columns = []
for fieldset_item in self.form_fieldsets:
self.form_columns = self.form_columns + list(fieldset_item[1].get('fields'))
else:
if not self.form_columns:
self.form_columns = list_cols
def form_get(self, form):
"""
Override this method to implement your form processing
"""
pass
def form_post(self, form):
"""
Override this method to implement your form processing
:param form: WTForm form
Return None or a flask response to render
a custom template or redirect the user
"""
pass
def _get_edit_widget(self, form=None, exclude_cols=None, widgets=None):
exclude_cols = exclude_cols or []
widgets = widgets or {}
widgets['edit'] = self.edit_widget(route_base=self.route_base,
form=form,
include_cols=self.form_columns,
exclude_cols=exclude_cols,
fieldsets=self.form_fieldsets
)
return widgets
class BaseModelView(BaseView):
"""
The base class of ModelView and ChartView, all properties are inherited
Customize ModelView and ChartView overriding this properties
This class supports all the basics for query
"""
datamodel = None
"""
Your sqla model you must initialize it like::
class MyView(ModelView):
datamodel = SQLAInterface(MyTable)
"""
title = 'Title'
search_columns = None
"""
List with allowed search columns, if not provided all possible search columns will be used
If you want to limit the search (*filter*) columns possibilities, define it with a list of column names from your model::
class MyView(ModelView):
datamodel = SQLAInterface(MyTable)
search_columns = ['name','address']
"""
search_exclude_columns = None
"""
List with columns to exclude from search. Search includes all possible columns by default
"""
search_form_extra_fields = None
"""
A dictionary containing column names and a WTForm
Form fields to be added to the Add form, these fields do not
exist on the model itself ex::
search_form_extra_fields = {'some_col':BooleanField('Some Col', default=False)}
"""
search_form_query_rel_fields = None
"""
Add Customized query for related fields on search form.
Assign a dictionary where the keys are the column names of
the related models to filter, the value for each key, is a list of lists with the
same format as base_filter
{'relation col name':[['Related model col',FilterClass,'Filter Value'],...],...}
Add a custom filter to form related fields::
class ContactModelView(ModelView):
datamodel = SQLAModel(Contact, db.session)
search_form_query_rel_fields = [('group':[['name',FilterStartsWith,'W']]}
"""
label_columns = None
"""
Dictionary of labels for your columns, override this if you want different pretify labels
example (will just override the label for name column)::
class MyView(ModelView):
datamodel = SQLAInterface(MyTable)
label_columns = {'name':'My Name Label Override'}
"""
search_form = None
""" To implement your own add WTF form for Search """
base_filters = None
"""
Filter the view use: [['column_name',BaseFilter,'value'],]
example::
def get_user():
return g.user
class MyView(ModelView):
datamodel = SQLAInterface(MyTable)
base_filters = [['created_by', FilterEqualFunction, get_user],
['name', FilterStartsWith, 'a']]
"""
base_order = None
"""
Use this property to set default ordering for lists ('col_name','asc|desc')::
class MyView(ModelView):
datamodel = SQLAInterface(MyTable)
base_order = ('my_column_name','asc')
"""
search_widget = SearchWidget
""" Search widget you can override with your own """
_base_filters = None
""" Internal base Filter from class Filters will always filter view """
_filters = None
""" Filters object will calculate all possible filter types based on search_columns """
def __init__(self, **kwargs):
"""
Constructor
"""
datamodel = kwargs.get('datamodel', None)
if datamodel:
self.datamodel = datamodel
self._init_properties()
self._init_forms()
self._init_titles()
super(BaseModelView, self).__init__(**kwargs)
def _gen_labels_columns(self, list_columns):
"""
Auto generates pretty label_columns from list of columns
"""
for col in list_columns:
if not self.label_columns.get(col):
self.label_columns[col] = self._prettify_column(col)
def _init_titles(self):
pass
def _init_properties(self):
self.label_columns = self.label_columns or {}
self.base_filters = self.base_filters or []
self.search_exclude_columns = self.search_exclude_columns or []
self.search_columns = self.search_columns or []
self._base_filters = self.datamodel.get_filters().add_filter_list(self.base_filters)
list_cols = self.datamodel.get_columns_list()
search_columns = self.datamodel.get_search_columns_list()
if not self.search_columns:
self.search_columns = [x for x in search_columns if x not in self.search_exclude_columns]
self._gen_labels_columns(list_cols)
self._filters = self.datamodel.get_filters(self.search_columns)
def _init_forms(self):
conv = GeneralModelConverter(self.datamodel)
if not self.search_form:
self.search_form = conv.create_form(self.label_columns,
self.search_columns,
extra_fields=self.search_form_extra_fields,
filter_rel_fields=self.search_form_query_rel_fields)
def _get_search_widget(self, form=None, exclude_cols=None, widgets=None):
exclude_cols = exclude_cols or []
widgets = widgets or {}
widgets['search'] = self.search_widget(route_base=self.route_base,
form=form,
include_cols=self.search_columns,
exclude_cols=exclude_cols,
filters=self._filters
)
return widgets
def _label_columns_json(self):
"""
Prepares dict with labels to be JSON serializable
"""
ret = {}
for key, value in list(self.label_columns.items()):
ret[key] = as_unicode(value.encode('UTF-8'))
return ret
class BaseCRUDView(BaseModelView):
"""
The base class for ModelView, all properties are inherited
Customize ModelView overriding this properties
"""
related_views = None
"""
List with ModelView classes
Will be displayed related with this one using relationship sqlalchemy property::
class MyView(ModelView):
datamodel = SQLAModel(Group, db.session)
related_views = [MyOtherRelatedView]
"""
_related_views = None
""" internal list with ref to instantiated view classes """
list_title = ""
""" List Title, if not configured the default is 'List ' with pretty model name """
show_title = ""
""" Show Title , if not configured the default is 'Show ' with pretty model name """
add_title = ""
""" Add Title , if not configured the default is 'Add ' with pretty model name """
edit_title = ""
""" Edit Title , if not configured the default is 'Edit ' with pretty model name """
list_columns = None
"""
A list of columns (or model's methods) to be displayed on the list view.
Use it to control the order of the display
"""
show_columns = None
"""
A list of columns (or model's methods) to be displayed on the show view.
Use it to control the order of the display
"""
add_columns = None
"""
A list of columns (or model's methods) to be displayed on the add form view.
Use it to control the order of the display
"""
edit_columns = None
"""
A list of columns (or model's methods) to be displayed on the edit form view.
Use it to control the order of the display
"""
show_exclude_columns = None
"""
A list of columns to exclude from the show view. By default all columns are included.
"""
add_exclude_columns = None
"""
A list of columns to exclude from the add form. By default all columns are included.
"""
edit_exclude_columns = None
"""
A list of columns to exclude from the edit form. By default all columns are included.
"""
order_columns = None
""" Allowed order columns """
page_size = 10
"""
Use this property to change default page size
"""
show_fieldsets = None
"""
show fieldsets django style [(<'TITLE'|None>, {'fields':[<F1>,<F2>,...]}),....]
::
class MyView(ModelView):
datamodel = SQLAModel(MyTable, db.session)
show_fieldsets = [
('Summary',{'fields':['name','address','group']}),
('Personal Info',{'fields':['birthday','personal_phone'],'expanded':False}),
]
"""
add_fieldsets = None
"""
add fieldsets django style (look at show_fieldsets for an example)
"""
edit_fieldsets = None
"""
edit fieldsets django style (look at show_fieldsets for an example)
"""
description_columns = None
"""
Dictionary with column descriptions that will be shown on the forms::
class MyView(ModelView):
datamodel = SQLAModel(MyTable, db.session)
description_columns = {'name':'your models name column','address':'the address column'}
"""
validators_columns = None
""" Dictionary to add your own validators for forms """
formatters_columns = None
""" Dictionary of formatter used to format the display of columns
formatters_columns = {'some_date_col': lambda x: x.isoformat() }
"""
add_form_extra_fields = None
"""
A dictionary containing column names and a WTForm
Form fields to be added to the Add form, these fields do not
exist on the model itself ex::
add_form_extra_fields = {'some_col':BooleanField('Some Col', default=False)}
"""
edit_form_extra_fields = None
""" Dictionary to add extra fields to the Edit form using this property """
add_form_query_rel_fields = None
"""
Add Customized query for related fields to add form.
Assign a dictionary where the keys are the column names of
the related models to filter, the value for each key, is a list of lists with the
same format as base_filter
{'relation col name':[['Related model col',FilterClass,'Filter Value'],...],...}
Add a custom filter to form related fields::
class ContactModelView(ModelView):
datamodel = SQLAModel(Contact, db.session)
add_form_query_rel_fields = {'group':[['name',FilterStartsWith,'W']]}
"""
edit_form_query_rel_fields = None
"""
Add Customized query for related fields to edit form.
Assign a dictionary where the keys are the column names of
the related models to filter, the value for each key, is a list of lists with the
same format as base_filter
{'relation col name':[['Related model col',FilterClass,'Filter Value'],...],...}
Add a custom filter to form related fields::
class ContactModelView(ModelView):
datamodel = SQLAModel(Contact, db.session)
edit_form_query_rel_fields = {'group':[['name',FilterStartsWith,'W']]}
"""
add_form = None
""" To implement your own, assign WTF form for Add """
edit_form = None
""" To implement your own, assign WTF form for Edit """
list_template = 'appbuilder/general/model/list.html'
""" Your own add jinja2 template for list """
edit_template = 'appbuilder/general/model/edit.html'
""" Your own add jinja2 template for edit """
add_template = 'appbuilder/general/model/add.html'
""" Your own add jinja2 template for add """
show_template = 'appbuilder/general/model/show.html'
""" Your own add jinja2 template for show """
list_widget = ListWidget
""" List widget override """
edit_widget = FormWidget
""" Edit widget override """
add_widget = FormWidget
""" Add widget override """
show_widget = ShowWidget
""" Show widget override """
actions = None
def __init__(self, **kwargs):
super(BaseCRUDView, self).__init__(**kwargs)
# collect and setup actions
self.actions = {}
for attr_name in dir(self):
func = getattr(self, attr_name)
if hasattr(func, '_action'):
action = ActionItem(*func._action, func=func)
self.base_permissions.append(action.name)
self.actions[action.name] = action
def _init_forms(self):
"""
Init forms for Add and Edit
"""
super(BaseCRUDView, self)._init_forms()
conv = GeneralModelConverter(self.datamodel)
if not self.add_form:
self.add_form = conv.create_form(self.label_columns,
self.add_columns,
self.description_columns,
self.validators_columns,
self.add_form_extra_fields,
self.add_form_query_rel_fields)
if not self.edit_form:
self.edit_form = conv.create_form(self.label_columns,
self.edit_columns,
self.description_columns,
self.validators_columns,
self.edit_form_extra_fields,
self.edit_form_query_rel_fields)
def _init_titles(self):
"""
Init Titles if not defined
"""
super(BaseCRUDView, self)._init_titles()
class_name = self.datamodel.model_name
if not self.list_title:
self.list_title = 'List ' + self._prettify_name(class_name)
if not self.add_title:
self.add_title = 'Add ' + self._prettify_name(class_name)
if not self.edit_title:
self.edit_title = 'Edit ' + self._prettify_name(class_name)
if not self.show_title:
self.show_title = 'Show ' + self._prettify_name(class_name)
self.title = self.list_title
def _init_properties(self):
"""
Init Properties
"""
super(BaseCRUDView, self)._init_properties()
# Reset init props
self.related_views = self.related_views or []
self._related_views = self._related_views or []
self.description_columns = self.description_columns or {}
self.validators_columns = self.validators_columns or {}
self.formatters_columns = self.formatters_columns or {}
self.add_form_extra_fields = self.add_form_extra_fields or {}
self.edit_form_extra_fields = self.edit_form_extra_fields or {}
self.show_exclude_columns = self.show_exclude_columns or []
self.add_exclude_columns = self.add_exclude_columns or []
self.edit_exclude_columns = self.edit_exclude_columns or []
# Generate base props
list_cols = self.datamodel.get_user_columns_list()
self.list_columns = self.list_columns or [list_cols[0]]
self._gen_labels_columns(self.list_columns)
self.order_columns = self.order_columns or self.datamodel.get_order_columns_list(list_columns=self.list_columns)
if self.show_fieldsets:
self.show_columns = []
for fieldset_item in self.show_fieldsets:
self.show_columns = self.show_columns + list(fieldset_item[1].get('fields'))
else:
if not self.show_columns:
self.show_columns = [x for x in list_cols if x not in self.show_exclude_columns]
if self.add_fieldsets:
self.add_columns = []
for fieldset_item in self.add_fieldsets:
self.add_columns = self.add_columns + list(fieldset_item[1].get('fields'))
else:
if not self.add_columns:
self.add_columns = [x for x in list_cols if x not in self.add_exclude_columns]
if self.edit_fieldsets:
self.edit_columns = []
for fieldset_item in self.edit_fieldsets:
self.edit_columns = self.edit_columns + list(fieldset_item[1].get('fields'))
else:
if not self.edit_columns:
self.edit_columns = [x for x in list_cols if x not in self.edit_exclude_columns]
"""
-----------------------------------------------------
GET WIDGETS SECTION
-----------------------------------------------------
"""
def _get_related_view_widget(self, item, related_view,
order_column='', order_direction='',
page=None, page_size=None):
fk = related_view.datamodel.get_related_fk(self.datamodel.obj)
filters = related_view.datamodel.get_filters()
# Check if it's a many to one model relation
if related_view.datamodel.is_relation_many_to_one(fk):
filters.add_filter_related_view(fk, self.datamodel.FilterRelationOneToManyEqual,
self.datamodel.get_pk_value(item))
# Check if it's a many to many model relation
elif related_view.datamodel.is_relation_many_to_many(fk):
filters.add_filter_related_view(fk, self.datamodel.FilterRelationManyToManyEqual,
self.datamodel.get_pk_value(item))
else:
if isclass(related_view) and issubclass(related_view, BaseView):
name = related_view.__name__
else:
name = related_view.__class__.__name__
log.error("Can't find relation on related view {0}".format(name))
return None
return related_view._get_view_widget(filters=filters,
order_column=order_column,
order_direction=order_direction,
page=page, page_size=page_size)
def _get_related_views_widgets(self, item, orders=None,
pages=None, page_sizes=None,
widgets=None, **args):
"""
:return:
Returns a dict with 'related_views' key with a list of
Model View widgets
"""
widgets = widgets or {}
widgets['related_views'] = []
for view in self._related_views:
if orders.get(view.__class__.__name__):
order_column, order_direction = orders.get(view.__class__.__name__)
else:
order_column, order_direction = '', ''
widgets['related_views'].append(self._get_related_view_widget(item, view,
order_column, order_direction,
page=pages.get(view.__class__.__name__),
page_size=page_sizes.get(
view.__class__.__name__)))
return widgets
def _get_view_widget(self, **kwargs):
"""
:return:
Returns a Model View widget
"""
return self._get_list_widget(**kwargs).get('list')
def _get_list_widget(self, filters,
actions=None,
order_column='',
order_direction='',
page=None,
page_size=None,
widgets=None,
**args):
""" get joined base filter and current active filter for query """
widgets = widgets or {}
actions = actions or self.actions
page_size = page_size or self.page_size
if not order_column and self.base_order:
order_column, order_direction = self.base_order
joined_filters = filters.get_joined_filters(self._base_filters)
count, lst = self.datamodel.query(joined_filters, order_column, order_direction, page=page, page_size=page_size)
pks = self.datamodel.get_keys(lst)
# serialize composite pks
pks = [self._serialize_pk_if_composite(pk) for pk in pks]
widgets['list'] = self.list_widget(label_columns=self.label_columns,
include_columns=self.list_columns,
value_columns=self.datamodel.get_values(lst, self.list_columns),
order_columns=self.order_columns,
formatters_columns=self.formatters_columns,
page=page,
page_size=page_size,
count=count,
pks=pks,
actions=actions,
filters=filters,
modelview_name=self.__class__.__name__)
return widgets
def _get_show_widget(self, pk, item, widgets=None, actions=None, show_fieldsets=None):
widgets = widgets or {}
actions = actions or self.actions
show_fieldsets = show_fieldsets or self.show_fieldsets
widgets['show'] = self.show_widget(pk=pk,
label_columns=self.label_columns,
include_columns=self.show_columns,
value_columns=self.datamodel.get_values_item(item, self.show_columns),
formatters_columns=self.formatters_columns,
actions=actions,
fieldsets=show_fieldsets,
modelview_name=self.__class__.__name__
)
return widgets
def _get_add_widget(self, form, exclude_cols=None, widgets=None):
exclude_cols = exclude_cols or []
widgets = widgets or {}
widgets['add'] = self.add_widget(form=form,
include_cols=self.add_columns,
exclude_cols=exclude_cols,
fieldsets=self.add_fieldsets
)
return widgets
def _get_edit_widget(self, form, exclude_cols=None, widgets=None):
exclude_cols = exclude_cols or []
widgets = widgets or {}
widgets['edit'] = self.edit_widget(form=form,
include_cols=self.edit_columns,
exclude_cols=exclude_cols,
fieldsets=self.edit_fieldsets
)
return widgets
def get_uninit_inner_views(self):
"""
Will return a list with views that need to be initialized.
Normally related_views from ModelView
"""
return self.related_views
def get_init_inner_views(self):
"""
Get the list of related ModelViews after they have been initialized
"""
return self._related_views
"""
-----------------------------------------------------
CRUD functions behaviour
-----------------------------------------------------
"""
def _list(self):
"""
list function logic, override to implement different logic
returns list and search widget
"""
if get_order_args().get(self.__class__.__name__):
order_column, order_direction = get_order_args().get(self.__class__.__name__)
else:
order_column, order_direction = '', ''
page = get_page_args().get(self.__class__.__name__)
page_size = get_page_size_args().get(self.__class__.__name__)
get_filter_args(self._filters)
widgets = self._get_list_widget(filters=self._filters,
order_column=order_column,
order_direction=order_direction,
page=page,
page_size=page_size)
form = self.search_form.refresh()
self.update_redirect()
return self._get_search_widget(form=form, widgets=widgets)
def _show(self, pk):
"""
show function logic, override to implement different logic
returns show and related list widget
"""
pages = get_page_args()
page_sizes = get_page_size_args()
orders = get_order_args()
item = self.datamodel.get(pk, self._base_filters)
if not item:
abort(404)
widgets = self._get_show_widget(pk, item)
self.update_redirect()
return self._get_related_views_widgets(item, orders=orders,
pages=pages, page_sizes=page_sizes, widgets=widgets)
def _add(self):
"""
Add function logic, override to implement different logic
returns add widget or None
"""
is_valid_form = True
get_filter_args(self._filters)
exclude_cols = self._filters.get_relation_cols()
form = self.add_form.refresh()
if request.method == 'POST':
self._fill_form_exclude_cols(exclude_cols, form)
if form.validate():
self.process_form(form, True)
item = self.datamodel.obj()
form.populate_obj(item)
try:
self.pre_add(item)
except Exception as e:
flash(str(e), "danger")
else:
if self.datamodel.add(item):
self.post_add(item)
flash(*self.datamodel.message)
finally:
return None
else:
is_valid_form = False
if is_valid_form:
self.update_redirect()
return self._get_add_widget(form=form, exclude_cols=exclude_cols)
def _edit(self, pk):
"""
Edit function logic, override to implement different logic
returns Edit widget and related list or None
"""
is_valid_form = True
pages = get_page_args()
page_sizes = get_page_size_args()
orders = get_order_args()
get_filter_args(self._filters)
exclude_cols = self._filters.get_relation_cols()
item = self.datamodel.get(pk, self._base_filters)
if not item:
abort(404)
# convert pk to correct type, if pk is non string type.
pk = self.datamodel.get_pk_value(item)
if request.method == 'POST':
form = self.edit_form.refresh(request.form)
# fill the form with the suppressed cols, generated from exclude_cols
self._fill_form_exclude_cols(exclude_cols, form)
# trick to pass unique validation
form._id = pk
if form.validate():
self.process_form(form, False)
form.populate_obj(item)
try:
self.pre_update(item)
except Exception as e:
flash(str(e), "danger")
else:
if self.datamodel.edit(item):
self.post_update(item)
flash(*self.datamodel.message)
finally:
return None
else:
is_valid_form = False
else:
# Only force form refresh for select cascade events
form = self.edit_form.refresh(obj=item)
# Perform additional actions to pre-fill the edit form.
self.prefill_form(form, pk)
widgets = self._get_edit_widget(form=form, exclude_cols=exclude_cols)
widgets = self._get_related_views_widgets(item, filters={},
orders=orders, pages=pages, page_sizes=page_sizes, widgets=widgets)
if is_valid_form:
self.update_redirect()
return widgets
def _delete(self, pk):
"""
Delete function logic, override to implement different logic
deletes the record with primary_key = pk
:param pk:
record primary key to delete
"""
item = self.datamodel.get(pk, self._base_filters)
if not item:
abort(404)
try:
self.pre_delete(item)
except Exception as e:
flash(str(e), "danger")
else:
if self.datamodel.delete(item):
self.post_delete(item)
flash(*self.datamodel.message)
self.update_redirect()
"""
------------------------------------------------
HELPER FUNCTIONS
------------------------------------------------
"""
def _serialize_pk_if_composite(self, pk):
def date_serializer(obj):
if isinstance(obj, datetime):
return {
"_type": "datetime",
"value": obj.isoformat()
}
elif isinstance(obj, date):
return {
"_type": "date",
"value": obj.isoformat()
}
if self.datamodel.is_pk_composite():
try:
pk = json.dumps(pk, default=date_serializer)
except:
pass
return pk
def _deserialize_pk_if_composite(self, pk):
def date_deserializer(obj):
if '_type' not in obj:
return obj
from dateutil import parser
if obj['_type'] == 'datetime':
return parser.parse(obj['value'])
elif obj['_type'] == 'date':
return parser.parse(obj['value']).date()
return obj
if self.datamodel.is_pk_composite():
try:
pk = json.loads(pk, object_hook=date_deserializer)
except:
pass
return pk
def _fill_form_exclude_cols(self, exclude_cols, form):
"""
fill the form with the suppressed cols, generated from exclude_cols
"""
for filter_key in exclude_cols:
filter_value = self._filters.get_filter_value(filter_key)
rel_obj = self.datamodel.get_related_obj(filter_key, filter_value)
if hasattr(form, filter_key):
field = getattr(form, filter_key)
field.data = rel_obj
def prefill_form(self, form, pk):
"""
Override this, will be called only if the current action is rendering
an edit form (a GET request), and is used to perform additional action to
prefill the form.
This is useful when you have added custom fields that depend on the
database contents. Fields that were added by name of a normal column
or relationship should work out of the box.
example::
def prefill_form(self, form, pk):
if form.email.data:
form.email_confirmation.data = form.email.data
"""
pass
def process_form(self, form, is_created):
"""
Override this, will be called only if the current action is submitting
a create/edit form (a POST request), and is used to perform additional
action before the form is used to populate the item.
By default does nothing.
example::
def process_form(self, form, is_created):
if not form.owner:
form.owner.data = 'n/a'
"""
pass
def pre_update(self, item):
"""
Override this, this method is called before the update takes place.
If an exception is raised by this method,
the message is shown to the user and the update operation is
aborted. Because of this behavior, it can be used as a way to
implement more complex logic around updates. For instance
allowing only the original creator of the object to update it.
"""
pass
def post_update(self, item):
"""
Override this, will be called after update
"""
pass
def pre_add(self, item):
"""
Override this, will be called before add.
If an exception is raised by this method,
the message is shown to the user and the add operation is aborted.
"""
pass
def post_add(self, item):
"""
Override this, will be called after update
"""
pass
def pre_delete(self, item):
"""
Override this, will be called before delete
If an exception is raised by this method,
the message is shown to the user and the delete operation is
aborted. Because of this behavior, it can be used as a way to
implement more complex logic around deletes. For instance
allowing only the original creator of the object to delete it.
"""
pass
def post_delete(self, item):
"""
Override this, will be called after delete
"""
pass | PypiClean |
/KeyboardPaster-0.1.6-py3-none-any.whl/keyboardpaster/keyboard_layout_detector.py | import platform
import subprocess
import ctypes
from ctypes import wintypes
from kivy.logger import Logger
def get_operating_system():
os_name = platform.system()
if os_name == "Linux":
return "Linux"
elif os_name == "Windows":
return "Windows"
elif os_name == "Darwin":
return "MacOS"
else:
raise Exception(f"Unsupported operating system: {os_name}")
def get_keyboard_layout_windows():
"""
Gets the keyboard language in use by the current
active window process.
"""
languages = {'0x436' : "Afrikaans - South Africa", '0x041c' : "Albanian - Albania", '0x045e' : "Amharic - Ethiopia", '0x401' : "Arabic - Saudi Arabia",
'0x1401' : "Arabic - Algeria", '0x3c01' : "Arabic - Bahrain", '0x0c01' : "Arabic - Egypt", '0x801' : "Arabic - Iraq", '0x2c01' : "Arabic - Jordan",
'0x3401' : "Arabic - Kuwait", '0x3001' : "Arabic - Lebanon", '0x1001' : "Arabic - Libya", '0x1801' : "Arabic - Morocco", '0x2001' : "Arabic - Oman",
'0x4001' : "Arabic - Qatar", '0x2801' : "Arabic - Syria", '0x1c01' : "Arabic - Tunisia", '0x3801' : "Arabic - U.A.E.", '0x2401' : "Arabic - Yemen",
'0x042b' : "Armenian - Armenia", '0x044d' : "Assamese", '0x082c' : "Azeri (Cyrillic)", '0x042c' : "Azeri (Latin)", '0x042d' : "Basque",
'0x423' : "Belarusian", '0x445' : "Bengali (India)", '0x845' : "Bengali (Bangladesh)", '0x141A' : "Bosnian (Bosnia/Herzegovina)", '0x402' : "Bulgarian",
'0x455' : "Burmese", '0x403' : "Catalan", '0x045c' : "Cherokee - United States", '0x804' : "Chinese - People's Republic of China",
'0x1004' : "Chinese - Singapore", '0x404' : "Chinese - Taiwan", '0x0c04' : "Chinese - Hong Kong SAR", '0x1404' : "Chinese - Macao SAR", '0x041a' : "Croatian",
'0x101a' : "Croatian (Bosnia/Herzegovina)", '0x405' : "Czech", '0x406' : "Danish", '0x465' : "Divehi", '0x413' : "Dutch - Netherlands", '0x813' : "Dutch - Belgium",
'0x466' : "Edo", '0x409' : "English - United States", '0x809' : "English - United Kingdom", '0x0c09' : "English - Australia", '0x2809' : "English - Belize",
'0x1009' : "English - Canada", '0x2409' : "English - Caribbean", '0x3c09' : "English - Hong Kong SAR", '0x4009' : "English - India", '0x3809' : "English - Indonesia",
'0x1809' : "English - Ireland", '0x2009' : "English - Jamaica", '0x4409' : "English - Malaysia", '0x1409' : "English - New Zealand", '0x3409' : "English - Philippines",
'0x4809' : "English - Singapore", '0x1c09' : "English - South Africa", '0x2c09' : "English - Trinidad", '0x3009' : "English - Zimbabwe", '0x425' : "Estonian",
'0x438' : "Faroese", '0x429' : "Farsi", '0x464' : "Filipino", '0x040b' : "Finnish", '0x040c' : "French - France", '0x080c' : "French - Belgium",
'0x2c0c' : "French - Cameroon", '0x0c0c' : "French - Canada", '0x240c' : "French - Democratic Rep. of Congo", '0x300c' : "French - Cote d'Ivoire",
'0x3c0c' : "French - Haiti", '0x140c' : "French - Luxembourg", '0x340c' : "French - Mali", '0x180c' : "French - Monaco", '0x380c' : "French - Morocco",
'0xe40c' : "French - North Africa", '0x200c' : "French - Reunion", '0x280c' : "French - Senegal", '0x100c' : "French - Switzerland",
'0x1c0c' : "French - West Indies", '0x462' : "Frisian - Netherlands", '0x467' : "Fulfulde - Nigeria", '0x042f' : "FYRO Macedonian", '0x083c' : "Gaelic (Ireland)",
'0x043c' : "Gaelic (Scotland)", '0x456' : "Galician", '0x437' : "Georgian", '0x407' : "German - Germany", '0x0c07' : "German - Austria", '0x1407' : "German - Liechtenstein",
'0x1007' : "German - Luxembourg", '0x807' : "German - Switzerland", '0x408' : "Greek", '0x474' : "Guarani - Paraguay", '0x447' : "Gujarati", '0x468' : "Hausa - Nigeria",
'0x475' : "Hawaiian - United States", '0x040d' : "Hebrew", '0x439' : "Hindi", '0x040e' : "Hungarian", '0x469' : "Ibibio - Nigeria", '0x040f' : "Icelandic",
'0x470' : "Igbo - Nigeria", '0x421' : "Indonesian", '0x045d' : "Inuktitut", '0x410' : "Italian - Italy", '0x810' : "Italian - Switzerland", '0x411' : "Japanese",
'0x044b' : "Kannada", '0x471' : "Kanuri - Nigeria", '0x860' : "Kashmiri", '0x460' : "Kashmiri (Arabic)", '0x043f' : "Kazakh", '0x453' : "Khmer", '0x457' : "Konkani",
'0x412' : "Korean", '0x440' : "Kyrgyz (Cyrillic)", '0x454' : "Lao", '0x476' : "Latin", '0x426' : "Latvian", '0x427' : "Lithuanian", '0x043e' : "Malay - Malaysia",
'0x083e' : "Malay - Brunei Darussalam", '0x044c' : "Malayalam", '0x043a' : "Maltese", '0x458' : "Manipuri", '0x481' : "Maori - New Zealand", '0x044e' : "Marathi",
'0x450' : "Mongolian (Cyrillic)", '0x850' : "Mongolian (Mongolian)", '0x461' : "Nepali", '0x861' : "Nepali - India", '0x414' : "Norwegian (Bokmål)",
'0x814' : "Norwegian (Nynorsk)", '0x448' : "Oriya", '0x472' : "Oromo", '0x479' : "Papiamentu", '0x463' : "Pashto", '0x415' : "Polish", '0x416' : "Portuguese - Brazil",
'0x816' : "Portuguese - Portugal", '0x446' : "Punjabi", '0x846' : "Punjabi (Pakistan)", '0x046B' : "Quecha - Bolivia", '0x086B' : "Quecha - Ecuador",
'0x0C6B' : "Quecha - Peru", '0x417' : "Rhaeto-Romanic", '0x418' : "Romanian", '0x818' : "Romanian - Moldava", '0x419' : "Russian", '0x819' : "Russian - Moldava",
'0x043b' : "Sami (Lappish)", '0x044f' : "Sanskrit", '0x046c' : "Sepedi", '0x0c1a' : "Serbian (Cyrillic)", '0x081a' : "Serbian (Latin)", '0x459' : "Sindhi - India",
'0x859' : "Sindhi - Pakistan", '0x045b' : "Sinhalese - Sri Lanka", '0x041b' : "Slovak", '0x424' : "Slovenian", '0x477' : "Somali", '0x042e' : "Sorbian",
'0x0c0a' : "Spanish - Spain (Modern Sort)", '0x040a' : "Spanish - Spain (Traditional Sort)", '0x2c0a' : "Spanish - Argentina", '0x400a' : "Spanish - Bolivia",
'0x340a' : "Spanish - Chile", '0x240a' : "Spanish - Colombia", '0x140a' : "Spanish - Costa Rica", '0x1c0a' : "Spanish - Dominican Republic",
'0x300a' : "Spanish - Ecuador", '0x440a' : "Spanish - El Salvador", '0x100a' : "Spanish - Guatemala", '0x480a' : "Spanish - Honduras", '0xe40a' : "Spanish - Latin America",
'0x080a' : "Spanish - Mexico", '0x4c0a' : "Spanish - Nicaragua", '0x180a' : "Spanish - Panama", '0x3c0a' : "Spanish - Paraguay", '0x280a' : "Spanish - Peru",
'0x500a' : "Spanish - Puerto Rico", '0x540a' : "Spanish - United States", '0x380a' : "Spanish - Uruguay", '0x200a' : "Spanish - Venezuela", '0x430' : "Sutu",
'0x441' : "Swahili", '0x041d' : "Swedish", '0x081d' : "Swedish - Finland", '0x045a' : "Syriac", '0x428' : "Tajik", '0x045f' : "Tamazight (Arabic)",
'0x085f' : "Tamazight (Latin)", '0x449' : "Tamil", '0x444' : "Tatar", '0x044a' : "Telugu", '0x041e' : "Thai", '0x851' : "Tibetan - Bhutan",
'0x451' : "Tibetan - People's Republic of China", '0x873' : "Tigrigna - Eritrea", '0x473' : "Tigrigna - Ethiopia", '0x431' : "Tsonga", '0x432' : "Tswana",
'0x041f' : "Turkish", '0x442' : "Turkmen", '0x480' : "Uighur - China", '0x422' : "Ukrainian", '0x420' : "Urdu", '0x820' : "Urdu - India", '0x843' : "Uzbek (Cyrillic)",
'0x443' : "Uzbek (Latin)", '0x433' : "Venda", '0x042a' : "Vietnamese", '0x452' : "Welsh", '0x434' : "Xhosa", '0x478' : "Yi", '0x043d' : "Yiddish", '0x046a' : "Yoruba",
'0x435' : "Zulu", '0x04ff' : "HID (Human Interface Device)"
}
user32 = ctypes.WinDLL('user32', use_last_error=True)
# Get the current active window handle
handle = user32.GetForegroundWindow()
# Get the thread id from that window handle
threadid = user32.GetWindowThreadProcessId(handle, 0)
# Get the keyboard layout id from the threadid
layout_id = user32.GetKeyboardLayout(threadid)
# Extract the keyboard language id from the keyboard layout id
language_id = layout_id & (2 ** 16 - 1)
# Convert the keyboard language id from decimal to hexadecimal
language_id_hex = hex(language_id)
# Check if the hex value is in the dictionary.
if language_id_hex in languages.keys():
return languages[language_id_hex]
else:
# Return language id hexadecimal value if not found.
return str(language_id_hex)
def get_keyboard_layout_linux():
try:
output = subprocess.check_output(["setxkbmap", "-query"]).decode("utf-8")
for line in output.splitlines():
if line.startswith("layout:"):
return line.split(" ")[-1]
except Exception as e:
Logger.error(f"Error detecting keyboard layout on Linux: {e}. Defaulting to English (US).")
return "EN_US"
def get_keyboard_layout_macos():
try:
output = subprocess.check_output(
["defaults", "read", "/Library/Preferences/com.apple.HIToolbox.plist", "AppleSelectedInputSources"]
).decode("utf-8")
start_index = output.find('"KeyboardLayout Name"')
if start_index != -1:
start_index = output.find('"', start_index + 22) + 1
end_index = output.find('"', start_index)
return output[start_index:end_index]
except Exception as e:
Logger.error(f"Error detecting keyboard layout on MacOS: {e}. Defaulting to English (US).")
return "EN_US"
def get_keyboard_layout():
try:
os_name = get_operating_system()
Logger.info(f"Detected operating system: {os_name}")
except Exception as e:
Logger.error(f"Error detecting operating system: {e}. Defaulting to English (US) keyboard layout.")
return "EN_US"
try:
if os_name == "Linux":
layout = get_keyboard_layout_linux()
elif os_name == "Windows":
layout = get_keyboard_layout_windows()
elif os_name == "MacOS":
layout = get_keyboard_layout_macos()
else:
raise Exception(f"Unsupported operating system: {os_name}")
except Exception as e:
Logger.error(f"Error detecting keyboard layout: {e}. Defaulting to English (US).")
layout = "EN_US"
Logger.info(f"Detected keyboard layout: {layout}")
return layout | PypiClean |
/DFHypercode-0.0.1-py3-none-any.whl/Hypercode/enums/targets.py | from .enum_util import AutoSnakeToPascalCaseNameEnum
from ..utils import remove_u200b_from_doc
from enum import auto, unique
class Target:
"""Represents any target of any action."""
pass
class SelectionTarget(Target):
"""Represents the current selection.
Attributes
----------\u200b
value : :class:`str`
Equal to ``"Selection"`` , always.
"""
value: str = "Selection"
def __repr__(self):
return f"<{self.__class__.__name__}>"
def __str__(self):
return self.value
@unique
class PlayerTarget(Target, AutoSnakeToPascalCaseNameEnum):
"""Contains the different targets a Player Action can have."""
DEFAULT = auto() #: The main player involved in the current Player Event.
SELECTION = auto() #: The current selection (selected player/s).
ALL_PLAYERS = auto() #: All players in the plot.
DAMAGER = auto() #: The damager in a damage-related event.
SHOOTER = auto() #: The shooter in a projectile-related event.
KILLER = auto() #: The killer in a kill-related event.
VICTIM = auto() #: The victim in a kill-related or damage-related event.
@unique
class EntityTarget(Target, AutoSnakeToPascalCaseNameEnum):
"""Contains the different targets an Entity Action can have."""
DEFAULT = auto(
) #: The main entity involved in the current Player/Entity Event, or the last spawned entity if none.
SELECTION = auto() #: The current selection (selected entity/entities or mob/s).
ALL_ENTITIES = auto() #: All entities on the plot.
ALL_MOBS = auto() #: All mobs on the plot.
LAST_ENTITY = auto() #: The most recently spawned entity.
LAST_MOB = auto() #: The most recently spawned mob.
ENTITY_NAME = auto() #: All entities whose names are equal to the text in the first parameter.
MOB_NAME = auto() #: All mob whose names are equal to the text in the first parameter.
# TODO: Check if Entity_Name and Mob_Name are the actual targets on entity actions, or if just "Name"
PROJECTILE = auto() #: The projectile in a projectile-related event.
KILLER = auto() #: Selects the killer in a kill-related event.
VICTIM = auto() #: Selects the victim in a kill-related or damage-related event.
remove_u200b_from_doc(SelectionTarget) | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/webhook_create_request_py3.py |
from msrest.serialization import Model
class WebhookCreateRequest(Model):
"""WebhookCreateRequest.
All required parameters must be populated in order to send to Azure.
:param webhook_name: Required. The name of the webhook <span
class='property-internal'>Required</span> <span
class='property-internal'>Must be between 0 and 64 characters</span>
:type webhook_name: str
:param webhook_description: The description for the webhook
:type webhook_description: str
:param webhook_event_type_id: Required. The type of event that will
trigger the webhook <span class='property-internal'>Required</span>
:type webhook_event_type_id: int
:param mailing_list: Required. List of email addresses. Each recipient in
this list will receive an email when a webhook fails or gets disabled
<span class='property-internal'>Required</span> <span
class='property-internal'>Cannot be Empty</span>
:type mailing_list: list[str]
:param url: Required. The URL to be invoked by the webhook <span
class='property-internal'>Required</span>
:type url: str
:param secret: Required. The encryption secret. <span
class='property-internal'>Required</span>
:type secret: str
"""
_validation = {
'webhook_name': {'required': True, 'max_length': 64, 'min_length': 0},
'webhook_event_type_id': {'required': True},
'mailing_list': {'required': True},
'url': {'required': True},
'secret': {'required': True},
}
_attribute_map = {
'webhook_name': {'key': 'webhookName', 'type': 'str'},
'webhook_description': {'key': 'webhookDescription', 'type': 'str'},
'webhook_event_type_id': {'key': 'webhookEventTypeId', 'type': 'int'},
'mailing_list': {'key': 'mailingList', 'type': '[str]'},
'url': {'key': 'url', 'type': 'str'},
'secret': {'key': 'secret', 'type': 'str'},
}
def __init__(self, *, webhook_name: str, webhook_event_type_id: int, mailing_list, url: str, secret: str, webhook_description: str=None, **kwargs) -> None:
super(WebhookCreateRequest, self).__init__(**kwargs)
self.webhook_name = webhook_name
self.webhook_description = webhook_description
self.webhook_event_type_id = webhook_event_type_id
self.mailing_list = mailing_list
self.url = url
self.secret = secret | PypiClean |
/Carpenter-1.0.2.zip/Carpenter-1.0.2/carpenter/carpenter.py | import sys
import itertools
from blocks.cellanalyzer import is_empty_cell
def append_column(table, col_name, default_value=None):
'''
Appends a column to the raw data without any integrity checks.
Args:
default_value: The value which will assigned, not copied into each row
'''
table[0].append(col_name.strip())
for row in table[1:]:
row.append(default_value)
def remove_column(table, remove_index):
'''
Removes the specified column from the table.
'''
for row_index in range(len(table)):
old_row = table[row_index]
new_row = []
for column_index in range(len(old_row)):
if column_index != remove_index:
new_row.append(old_row[column_index])
table[row_index] = new_row
return table
def insert_column(table, insert_column, col_name=None, default_value=None):
'''
Inserts a new column before another specified column (by name or index).
Args:
insert_column: The column index or first row name where the insertion should occur
col_name: The name to insert into the first row of the column. Leaving this argument
to the default of None will apply the default_value to that row's cell.
default_value: Can be a value or function which takes (row, index, value) as
arguments to return a value.
'''
column_labels = table[0]
following_index = 0
def set_cell(row, column_index, value):
# Allow function calls
if hasattr(value, '__call__'):
row[column_index] = value(column_labels, row, column_index)
else:
row[column_index] = value
if isinstance(insert_column, basestring):
insert_column = insert_column.strip()
for column_index in range(len(column_labels)):
if column_labels[column_index] == insert_column:
following_index = column_index
break
else:
following_index = insert_column
col_data_start = 0
if col_name != None:
table[0].insert(following_index, col_name.strip())
col_data_start = 1
for row in table[col_data_start:]:
row.insert(following_index, None)
if default_value:
set_cell(row, min(following_index, len(row)-1), default_value)
def stitch_block(block_list):
'''
Stitches blocks together into a single block columnwise. These blocks are 2D tables usually
generated from tableproc. The final block will be of dimensions (max(num_rows), sum(num_cols)).
'''
block_out = [[]]
for block in block_list:
num_row = len(block)
row_len = len(block[0])
if len(block_out) < num_row:
for i in range(num_row-len(block_out)):
block_out.append([None]*len(block_out[0]))
for row_out, row_in in zip(block_out, block):
row_out.extend(row_in)
if len(block_out) > num_row:
for row_out in block_out[num_row:]:
row_out.extend([None]*row_len)
return block_out
def stitch_block_rows(block_list):
'''
Stitches blocks together into a single block rowwise. These blocks are 2D tables usually
generated from tableproc. The final block will be of dimensions (sum(num_rows), max(num_cols)).
'''
stitched = list(itertools.chain(*block_list))
max_length = max(len(row) for row in stitched)
for row in stitched:
if len(row) < max_length:
row += [None] * (max_length - len(row))
return stitched
def row_content_length(row):
'''
Returns the length of non-empty content in a given row.
'''
if not row:
return 0
try:
return (index + 1 for index, cell in reversed(list(enumerate(row))) if not is_empty_cell(cell)).next()
except StopIteration:
return len(row)
def split_block_by_row_length(block, split_row_length):
'''
Splits the block by finding all rows with less consequetive, non-empty rows than the
min_row_length input.
'''
split_blocks = []
current_block = []
for row in block:
if row_content_length(row) <= split_row_length:
if current_block:
split_blocks.append(current_block)
split_blocks.append([row])
current_block = []
else:
current_block.append(row)
if current_block:
split_blocks.append(current_block)
return split_blocks
def fill_block_blanks(block, fill_value):
for row in block:
for column_index, cell in enumerate(row):
if is_empty_cell(cell):
row[column_index] = fill_value
return block | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/core/checks/registry.py | from itertools import chain
from django.utils.inspect import func_accepts_kwargs
from django.utils.itercompat import is_iterable
class Tags:
"""
Built-in tags for internal checks.
"""
admin = "admin"
async_support = "async_support"
caches = "caches"
compatibility = "compatibility"
database = "database"
files = "files"
models = "models"
security = "security"
signals = "signals"
sites = "sites"
staticfiles = "staticfiles"
templates = "templates"
translation = "translation"
urls = "urls"
class CheckRegistry:
def __init__(self):
self.registered_checks = set()
self.deployment_checks = set()
def register(self, check=None, *tags, **kwargs):
"""
Can be used as a function or a decorator. Register given function
`f` labeled with given `tags`. The function should receive **kwargs
and return list of Errors and Warnings.
Example::
registry = CheckRegistry()
@registry.register('mytag', 'anothertag')
def my_check(app_configs, **kwargs):
# ... perform checks and collect `errors` ...
return errors
# or
registry.register(my_check, 'mytag', 'anothertag')
"""
def inner(check):
if not func_accepts_kwargs(check):
raise TypeError(
"Check functions must accept keyword arguments (**kwargs)."
)
check.tags = tags
checks = (
self.deployment_checks
if kwargs.get("deploy")
else self.registered_checks
)
checks.add(check)
return check
if callable(check):
return inner(check)
else:
if check:
tags += (check,)
return inner
def run_checks(
self,
app_configs=None,
tags=None,
include_deployment_checks=False,
databases=None,
):
"""
Run all registered checks and return list of Errors and Warnings.
"""
errors = []
checks = self.get_checks(include_deployment_checks)
if tags is not None:
checks = [check for check in checks if not set(check.tags).isdisjoint(tags)]
for check in checks:
new_errors = check(app_configs=app_configs, databases=databases)
if not is_iterable(new_errors):
raise TypeError(
"The function %r did not return a list. All functions "
"registered with the checks registry must return a list." % check,
)
errors.extend(new_errors)
return errors
def tag_exists(self, tag, include_deployment_checks=False):
return tag in self.tags_available(include_deployment_checks)
def tags_available(self, deployment_checks=False):
return set(
chain.from_iterable(
check.tags for check in self.get_checks(deployment_checks)
)
)
def get_checks(self, include_deployment_checks=False):
checks = list(self.registered_checks)
if include_deployment_checks:
checks.extend(self.deployment_checks)
return checks
registry = CheckRegistry()
register = registry.register
run_checks = registry.run_checks
tag_exists = registry.tag_exists | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/dizoo/mujoco/config/walker2d_onppo_config.py | from easydict import EasyDict
collector_env_num = 1
evaluator_env_num = 1
walker2d_onppo_config = dict(
exp_name='walker2d_onppo_seed0',
env=dict(
env_id='Walker2d-v3',
norm_obs=dict(use_norm=False, ),
norm_reward=dict(use_norm=False, ),
collector_env_num=collector_env_num,
evaluator_env_num=evaluator_env_num,
n_evaluator_episode=10,
stop_value=6000,
),
policy=dict(
cuda=True,
recompute_adv=True,
action_space='continuous',
model=dict(
action_space='continuous',
obs_shape=17,
action_shape=6,
),
learn=dict(
epoch_per_collect=10,
update_per_collect=1,
batch_size=320,
learning_rate=3e-4,
value_weight=0.5,
entropy_weight=0.001,
clip_ratio=0.2,
adv_norm=True,
value_norm=True,
# for onppo, when we recompute adv, we need the key done in data to split traj, so we must
# use ignore_done=False here,
# but when we add key traj_flag in data as the backup for key done, we could choose to use ignore_done=True
# for halfcheetah, the length=1000
# ignore_done=True,
ignore_done=False,
grad_clip_type='clip_norm',
grad_clip_value=0.5,
),
collect=dict(
collector_env_num=collector_env_num,
n_sample=3200,
unroll_len=1,
discount_factor=0.99,
gae_lambda=0.95,
),
eval=dict(evaluator=dict(eval_freq=500, )),
),
)
walker2d_onppo_config = EasyDict(walker2d_onppo_config)
main_config = walker2d_onppo_config
walker2d_onppo_create_config = dict(
env=dict(
type='mujoco',
import_names=['dizoo.mujoco.envs.mujoco_env'],
),
env_manager=dict(type='base'),
# env_manager=dict(type='subprocess'),
policy=dict(type='ppo', ),
)
walker2d_onppo_create_config = EasyDict(walker2d_onppo_create_config)
create_config = walker2d_onppo_create_config
if __name__ == "__main__":
# or you can enter `ding -m serial_onpolicy -c walker2d_onppo_config.py -s 0`
from ding.entry import serial_pipeline_onpolicy
serial_pipeline_onpolicy([main_config, create_config], seed=0) | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/us_tax_words.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def us_tax_words(path):
"""Number of Words in US Tax Law
Thousands of words in US tax law for 1995 to 2015 in 10 year intervals.
This includes income taxes and all taxes in the code itself (written by
congress) and regulations (written by government administrators). For
2015 only "EntireTaxCodeAndRegs" is given; for other years, this number
is broken down by income tax vs. other taxes and code vs. regulations.
A `data.frame` containing:
year
tax year
IncomeTaxCode
number of words in thousands in the US income tax code
otherTaxCode
number of words in thousands in US tax code other than income tax
EntireTaxCode
number of words in thousands in the US tax code
IncomeTaxRegulations
number of words in thousands in US income tax regulations
otherTaxRegulations
number of words in thousands in US tax regulations other than income
tax
IncomeTaxCodeAndRegs
number of words in thousands in both the code and regulations for
the US income tax
otherTaxCodeAndRegs
number of wrds in thousands in both code and regulations for US
taxes apart from income taxes.
EntireTaxCodeAndRegs
number of words in thousands in US tax code and regulations
`Tax Foundation: Number of Words in Internal Revenue Code and Federal
Tax Regulations,
1955-2005 <http://taxfoundation.org/article/number-words-internal-revenue-cod
e-and-federal-tax-regulations-1955-2005>`__
Scott Greenberg, `"Federal Tax Laws and Regulations are Now Over 10
Million Words Long", October 08,
2015 <http://taxfoundation.org/blog/federal-tax-laws-and-regulations-are-now-
over-10-million-words-long>`__
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `us_tax_words.csv`.
Returns:
Tuple of np.ndarray `x_train` with 7 rows and 10 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'us_tax_words.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/UStaxWords.csv'
maybe_download_and_extract(path, url,
save_file_name='us_tax_words.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/readable-stream/errors.js | 'use strict';
const codes = {};
function createErrorType(code, message, Base) {
if (!Base) {
Base = Error
}
function getMessage (arg1, arg2, arg3) {
if (typeof message === 'string') {
return message
} else {
return message(arg1, arg2, arg3)
}
}
class NodeError extends Base {
constructor (arg1, arg2, arg3) {
super(getMessage(arg1, arg2, arg3));
}
}
NodeError.prototype.name = Base.name;
NodeError.prototype.code = code;
codes[code] = NodeError;
}
// https://github.com/nodejs/node/blob/v10.8.0/lib/internal/errors.js
function oneOf(expected, thing) {
if (Array.isArray(expected)) {
const len = expected.length;
expected = expected.map((i) => String(i));
if (len > 2) {
return `one of ${thing} ${expected.slice(0, len - 1).join(', ')}, or ` +
expected[len - 1];
} else if (len === 2) {
return `one of ${thing} ${expected[0]} or ${expected[1]}`;
} else {
return `of ${thing} ${expected[0]}`;
}
} else {
return `of ${thing} ${String(expected)}`;
}
}
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith
function startsWith(str, search, pos) {
return str.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search;
}
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/endsWith
function endsWith(str, search, this_len) {
if (this_len === undefined || this_len > str.length) {
this_len = str.length;
}
return str.substring(this_len - search.length, this_len) === search;
}
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes
function includes(str, search, start) {
if (typeof start !== 'number') {
start = 0;
}
if (start + search.length > str.length) {
return false;
} else {
return str.indexOf(search, start) !== -1;
}
}
createErrorType('ERR_INVALID_OPT_VALUE', function (name, value) {
return 'The value "' + value + '" is invalid for option "' + name + '"'
}, TypeError);
createErrorType('ERR_INVALID_ARG_TYPE', function (name, expected, actual) {
// determiner: 'must be' or 'must not be'
let determiner;
if (typeof expected === 'string' && startsWith(expected, 'not ')) {
determiner = 'must not be';
expected = expected.replace(/^not /, '');
} else {
determiner = 'must be';
}
let msg;
if (endsWith(name, ' argument')) {
// For cases like 'first argument'
msg = `The ${name} ${determiner} ${oneOf(expected, 'type')}`;
} else {
const type = includes(name, '.') ? 'property' : 'argument';
msg = `The "${name}" ${type} ${determiner} ${oneOf(expected, 'type')}`;
}
msg += `. Received type ${typeof actual}`;
return msg;
}, TypeError);
createErrorType('ERR_STREAM_PUSH_AFTER_EOF', 'stream.push() after EOF');
createErrorType('ERR_METHOD_NOT_IMPLEMENTED', function (name) {
return 'The ' + name + ' method is not implemented'
});
createErrorType('ERR_STREAM_PREMATURE_CLOSE', 'Premature close');
createErrorType('ERR_STREAM_DESTROYED', function (name) {
return 'Cannot call ' + name + ' after a stream was destroyed';
});
createErrorType('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times');
createErrorType('ERR_STREAM_CANNOT_PIPE', 'Cannot pipe, not readable');
createErrorType('ERR_STREAM_WRITE_AFTER_END', 'write after end');
createErrorType('ERR_STREAM_NULL_VALUES', 'May not write null values to stream', TypeError);
createErrorType('ERR_UNKNOWN_ENCODING', function (arg) {
return 'Unknown encoding: ' + arg
}, TypeError);
createErrorType('ERR_STREAM_UNSHIFT_AFTER_END_EVENT', 'stream.unshift() after end event');
module.exports.codes = codes; | PypiClean |
/OctoPrint-1.9.2.tar.gz/OctoPrint-1.9.2/src/octoprint/server/util/tornado.py | __author__ = "Gina Häußge <osd@foosel.net>"
__license__ = "GNU Affero General Public License http://www.gnu.org/licenses/agpl.html"
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import logging
import mimetypes
import os
import re
import sys
from urllib.parse import urlparse
import tornado
import tornado.escape
import tornado.gen
import tornado.http1connection
import tornado.httpclient
import tornado.httpserver
import tornado.httputil
import tornado.iostream
import tornado.tcpserver
import tornado.util
import tornado.web
from zipstream.ng import ZIP_DEFLATED, ZipStream
import octoprint.util
def fix_json_encode():
"""
This makes tornado.escape.json_encode use octoprint.util.JsonEncoding.encode as fallback in order to allow
serialization of globally registered types like frozendict and others.
"""
import json
from octoprint.util.json import JsonEncoding
def fixed_json_encode(value):
return json.dumps(value, default=JsonEncoding.encode, allow_nan=False).replace(
"</", "<\\/"
)
import tornado.escape
tornado.escape.json_encode = fixed_json_encode
def enable_per_message_deflate_extension():
"""
This configures tornado.websocket.WebSocketHandler.get_compression_options to support the permessage-deflate extension
to the websocket protocol, minimizing data bandwidth if clients support the extension as well
"""
def get_compression_options(self):
return {"compression_level": 1, "mem_level": 1}
tornado.websocket.WebSocketHandler.get_compression_options = get_compression_options
def fix_websocket_check_origin():
"""
This fixes tornado.websocket.WebSocketHandler.check_origin to do the same origin check against the Host
header case-insensitively, as defined in RFC6454, Section 4, item 5.
"""
scheme_translation = {"wss": "https", "ws": "http"}
def patched_check_origin(self, origin):
def get_check_tuple(urlstring):
parsed = urlparse(urlstring)
scheme = scheme_translation.get(parsed.scheme, parsed.scheme)
return (
scheme,
parsed.hostname,
parsed.port
if parsed.port
else 80
if scheme == "http"
else 443
if scheme == "https"
else None,
)
return get_check_tuple(origin) == get_check_tuple(self.request.full_url())
import tornado.websocket
tornado.websocket.WebSocketHandler.check_origin = patched_check_origin
# ~~ More sensible logging
class RequestlessExceptionLoggingMixin(tornado.web.RequestHandler):
LOG_REQUEST = False
def log_exception(self, typ, value, tb, *args, **kwargs):
if isinstance(value, tornado.web.HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = [value.status_code, self._request_summary()] + list(value.args)
tornado.web.gen_log.warning(format, *args)
else:
if self.LOG_REQUEST:
tornado.web.app_log.error(
"Uncaught exception %s\n%r",
self._request_summary(),
self.request,
exc_info=(typ, value, tb),
)
else:
tornado.web.app_log.error(
"Uncaught exception %s",
self._request_summary(),
exc_info=(typ, value, tb),
)
# ~~ CORS support
class CorsSupportMixin(tornado.web.RequestHandler):
"""
`tornado.web.RequestHandler <http://tornado.readthedocs.org/en/branch4.0/web.html#request-handlers>`_ mixin that
makes sure to set CORS headers similarly to the Flask backed API endpoints.
"""
ENABLE_CORS = False
def set_default_headers(self):
origin = self.request.headers.get("Origin")
if self.request.method != "OPTIONS" and origin and self.ENABLE_CORS:
self.set_header("Access-Control-Allow-Origin", origin)
@tornado.gen.coroutine
def options(self, *args, **kwargs):
if self.ENABLE_CORS:
origin = self.request.headers.get("Origin")
method = self.request.headers.get("Access-Control-Request-Method")
# Allow the origin which made the XHR
self.set_header("Access-Control-Allow-Origin", origin)
# Allow the actual method
self.set_header("Access-Control-Allow-Methods", method)
# Allow for 10 seconds
self.set_header("Access-Control-Max-Age", "10")
# 'preflight' request contains the non-standard headers the real request will have (like X-Api-Key)
custom_headers = self.request.headers.get("Access-Control-Request-Headers")
if custom_headers is not None:
self.set_header("Access-Control-Allow-Headers", custom_headers)
self.set_status(204)
self.finish()
# ~~ WSGI middleware
@tornado.web.stream_request_body
class UploadStorageFallbackHandler(RequestlessExceptionLoggingMixin, CorsSupportMixin):
"""
A ``RequestHandler`` similar to ``tornado.web.FallbackHandler`` which fetches any files contained in the request bodies
of content type ``multipart``, stores them in temporary files and supplies the ``fallback`` with the file's ``name``,
``content_type``, ``path`` and ``size`` instead via a rewritten body.
Basically similar to what the nginx upload module does.
Basic request body example:
.. code-block:: none
------WebKitFormBoundarypYiSUx63abAmhT5C
Content-Disposition: form-data; name="file"; filename="test.gcode"
Content-Type: application/octet-stream
...
------WebKitFormBoundarypYiSUx63abAmhT5C
Content-Disposition: form-data; name="apikey"
my_funny_apikey
------WebKitFormBoundarypYiSUx63abAmhT5C
Content-Disposition: form-data; name="select"
true
------WebKitFormBoundarypYiSUx63abAmhT5C--
That would get turned into:
.. code-block:: none
------WebKitFormBoundarypYiSUx63abAmhT5C
Content-Disposition: form-data; name="apikey"
my_funny_apikey
------WebKitFormBoundarypYiSUx63abAmhT5C
Content-Disposition: form-data; name="select"
true
------WebKitFormBoundarypYiSUx63abAmhT5C
Content-Disposition: form-data; name="file.path"
Content-Type: text/plain; charset=utf-8
/tmp/tmpzupkro
------WebKitFormBoundarypYiSUx63abAmhT5C
Content-Disposition: form-data; name="file.name"
Content-Type: text/plain; charset=utf-8
test.gcode
------WebKitFormBoundarypYiSUx63abAmhT5C
Content-Disposition: form-data; name="file.content_type"
Content-Type: text/plain; charset=utf-8
application/octet-stream
------WebKitFormBoundarypYiSUx63abAmhT5C
Content-Disposition: form-data; name="file.size"
Content-Type: text/plain; charset=utf-8
349182
------WebKitFormBoundarypYiSUx63abAmhT5C--
The underlying application can then access the contained files via their respective paths and just move them
where necessary.
"""
BODY_METHODS = ("POST", "PATCH", "PUT")
""" The request methods that may contain a request body. """
def initialize(
self, fallback, file_prefix="tmp", file_suffix="", path=None, suffixes=None
):
if not suffixes:
suffixes = {}
self._fallback = fallback
self._file_prefix = file_prefix
self._file_suffix = file_suffix
self._path = path
self._suffixes = {key: key for key in ("name", "path", "content_type", "size")}
for suffix_type, suffix in suffixes.items():
if suffix_type in self._suffixes and suffix is not None:
self._suffixes[suffix_type] = suffix
# multipart boundary
self._multipart_boundary = None
# Parts, files and values will be stored here
self._parts = {}
self._files = []
# Part currently being processed
self._current_part = None
# content type of request body
self._content_type = None
# bytes left to read according to content_length of request body
self._bytes_left = 0
# buffer needed for identifying form data parts
self._buffer = b""
# buffer for new body
self._new_body = b""
# logger
self._logger = logging.getLogger(__name__)
def prepare(self):
"""
Prepares the processing of the request. If it's a request that may contain a request body (as defined in
:attr:`UploadStorageFallbackHandler.BODY_METHODS`) prepares the multipart parsing if content type fits. If it's a
body-less request, just calls the ``fallback`` with an empty body and finishes the request.
"""
if self.request.method in UploadStorageFallbackHandler.BODY_METHODS:
self._bytes_left = self.request.headers.get("Content-Length", 0)
self._content_type = self.request.headers.get("Content-Type", None)
# request might contain a body
if self.is_multipart():
if not self._bytes_left:
# we don't support requests without a content-length
raise tornado.web.HTTPError(
411, log_message="No Content-Length supplied"
)
# extract the multipart boundary
fields = self._content_type.split(";")
for field in fields:
k, sep, v = field.strip().partition("=")
if k == "boundary" and v:
if v.startswith('"') and v.endswith('"'):
self._multipart_boundary = tornado.escape.utf8(v[1:-1])
else:
self._multipart_boundary = tornado.escape.utf8(v)
break
else:
# RFC2046 section 5.1 (as referred to from RFC 7578) defines the boundary
# parameter as mandatory for multipart requests:
#
# The only mandatory global parameter for the "multipart" media type is
# the boundary parameter, which consists of 1 to 70 characters [...]
#
# So no boundary? 400 Bad Request
raise tornado.web.HTTPError(
400, log_message="No multipart boundary supplied"
)
else:
self._fallback(self.request, b"")
self._finished = True
def data_received(self, chunk):
"""
Called by Tornado on receiving a chunk of the request body. If request is a multipart request, takes care of
processing the multipart data structure via :func:`_process_multipart_data`. If not, just adds the chunk to
internal in-memory buffer.
:param chunk: chunk of data received from Tornado
"""
data = self._buffer + chunk
if self.is_multipart():
self._process_multipart_data(data)
else:
self._buffer = data
def is_multipart(self):
"""Checks whether this request is a ``multipart`` request"""
return self._content_type is not None and self._content_type.startswith(
"multipart"
)
def _process_multipart_data(self, data):
"""
Processes the given data, parsing it for multipart definitions and calling the appropriate methods.
:param data: the data to process as a string
"""
# check for boundary
delimiter = b"--%s" % self._multipart_boundary
delimiter_loc = data.find(delimiter)
delimiter_len = len(delimiter)
end_of_header = -1
if delimiter_loc != -1:
# found the delimiter in the currently available data
delimiter_data_end = 0 if delimiter_loc == 0 else delimiter_loc - 2
data, self._buffer = data[0:delimiter_data_end], data[delimiter_loc:]
end_of_header = self._buffer.find(b"\r\n\r\n")
else:
# make sure any boundary (with single or double ==) contained at the end of chunk does not get
# truncated by this processing round => save it to the buffer for next round
endlen = len(self._multipart_boundary) + 4
data, self._buffer = data[0:-endlen], data[-endlen:]
# stream data to part handler
if data and self._current_part:
self._on_part_data(self._current_part, data)
if end_of_header >= 0:
self._on_part_header(self._buffer[delimiter_len + 2 : end_of_header])
self._buffer = self._buffer[end_of_header + 4 :]
if delimiter_loc != -1 and self._buffer.strip() == delimiter + b"--":
# we saw the last boundary and are at the end of our request
if self._current_part:
self._on_part_finish(self._current_part)
self._current_part = None
self._buffer = b""
self._on_request_body_finish()
def _on_part_header(self, header):
"""
Called for a new multipart header, takes care of parsing the header and calling :func:`_on_part` with the
relevant data, setting the current part in the process.
:param header: header to parse
"""
# close any open parts
if self._current_part:
self._on_part_finish(self._current_part)
self._current_part = None
header_check = header.find(self._multipart_boundary)
if header_check != -1:
self._logger.warning(
"Header still contained multipart boundary, stripping it..."
)
header = header[header_check:]
# convert to dict
try:
header = tornado.httputil.HTTPHeaders.parse(header.decode("utf-8"))
except UnicodeDecodeError:
try:
header = tornado.httputil.HTTPHeaders.parse(header.decode("iso-8859-1"))
except Exception:
# looks like we couldn't decode something here neither as UTF-8 nor ISO-8859-1
self._logger.warning(
"Could not decode multipart headers in request, should be either UTF-8 or ISO-8859-1"
)
self.send_error(400)
return
disp_header = header.get("Content-Disposition", "")
disposition, disp_params = _parse_header(disp_header, strip_quotes=False)
if disposition != "form-data":
self._logger.warning(
"Got a multipart header without form-data content disposition, ignoring that one"
)
return
if not disp_params.get("name"):
self._logger.warning("Got a multipart header without name, ignoring that one")
return
filename = disp_params.get("filename*", None) # RFC 5987 header present?
if filename is not None:
try:
filename = _extended_header_value(filename)
except Exception:
# parse error, this is not RFC 5987 compliant after all
self._logger.warning(
"extended filename* value {!r} is not RFC 5987 compliant".format(
filename
)
)
self.send_error(400)
return
else:
# no filename* header, just strip quotes from filename header then and be done
filename = _strip_value_quotes(disp_params.get("filename", None))
self._current_part = self._on_part_start(
_strip_value_quotes(disp_params["name"]),
header.get("Content-Type", None),
filename=filename,
)
def _on_part_start(self, name, content_type, filename=None):
"""
Called for new parts in the multipart stream. If ``filename`` is given creates new ``file`` part (which leads
to storage of the data as temporary file on disk), if not creates a new ``data`` part (which stores
incoming data in memory).
Structure of ``file`` parts:
* ``name``: name of the part
* ``filename``: filename associated with the part
* ``path``: path to the temporary file storing the file's data
* ``content_type``: content type of the part
* ``file``: file handle for the temporary file (mode "wb", not deleted on close, will be deleted however after
handling of the request has finished in :func:`_handle_method`)
Structure of ``data`` parts:
* ``name``: name of the part
* ``content_type``: content type of the part
* ``data``: bytes of the part (initialized to an empty string)
:param name: name of the part
:param content_type: content type of the part
:param filename: filename associated with the part.
:return: dict describing the new part
"""
if filename is not None:
# this is a file
import tempfile
handle = tempfile.NamedTemporaryFile(
mode="wb",
prefix=self._file_prefix,
suffix=self._file_suffix,
dir=self._path,
delete=False,
)
return {
"name": tornado.escape.utf8(name),
"filename": tornado.escape.utf8(filename),
"path": tornado.escape.utf8(handle.name),
"content_type": tornado.escape.utf8(content_type),
"file": handle,
}
else:
return {
"name": tornado.escape.utf8(name),
"content_type": tornado.escape.utf8(content_type),
"data": b"",
}
def _on_part_data(self, part, data):
"""
Called when new bytes are received for the given ``part``, takes care of writing them to their storage.
:param part: part for which data was received
:param data: data chunk which was received
"""
if "file" in part:
part["file"].write(data)
else:
part["data"] += data
def _on_part_finish(self, part):
"""
Called when a part gets closed, takes care of storing the finished part in the internal parts storage and for
``file`` parts closing the temporary file and storing the part in the internal files storage.
:param part: part which was closed
"""
name = part["name"]
self._parts[name] = part
if "file" in part:
self._files.append(part["path"])
part["file"].close()
del part["file"]
def _on_request_body_finish(self):
"""
Called when the request body has been read completely. Takes care of creating the replacement body out of the
logged parts, turning ``file`` parts into new ``data`` parts.
"""
self._new_body = b""
for name, part in self._parts.items():
if "filename" in part:
# add form fields for filename, path, size and content_type for all files contained in the request
if "path" not in part:
continue
parameters = {
"name": part["filename"],
"path": part["path"],
"size": str(os.stat(part["path"]).st_size),
}
if "content_type" in part:
parameters["content_type"] = part["content_type"]
fields = {
self._suffixes[key]: value for (key, value) in parameters.items()
}
for n, p in fields.items():
if n is None or p is None:
continue
key = name + b"." + octoprint.util.to_bytes(n)
self._new_body += b"--%s\r\n" % self._multipart_boundary
self._new_body += (
b'Content-Disposition: form-data; name="%s"\r\n' % key
)
self._new_body += b"Content-Type: text/plain; charset=utf-8\r\n"
self._new_body += b"\r\n"
self._new_body += octoprint.util.to_bytes(p) + b"\r\n"
elif "data" in part:
self._new_body += b"--%s\r\n" % self._multipart_boundary
value = part["data"]
self._new_body += b'Content-Disposition: form-data; name="%s"\r\n' % name
if "content_type" in part and part["content_type"] is not None:
self._new_body += b"Content-Type: %s\r\n" % part["content_type"]
self._new_body += b"\r\n"
self._new_body += value + b"\r\n"
self._new_body += b"--%s--\r\n" % self._multipart_boundary
def _handle_method(self, *args, **kwargs):
"""
Takes care of defining the new request body if necessary and forwarding
the current request and changed body to the ``fallback``.
"""
# determine which body to supply
body = b""
if self.is_multipart():
# make sure we really processed all data in the buffer
while len(self._buffer):
self._process_multipart_data(self._buffer)
# use rewritten body
body = self._new_body
elif self.request.method in UploadStorageFallbackHandler.BODY_METHODS:
# directly use data from buffer
body = self._buffer
# rewrite content length
self.request.headers["Content-Length"] = len(body)
try:
# call the configured fallback with request and body to use
self._fallback(self.request, body)
self._headers_written = True
finally:
# make sure the temporary files are removed again
for f in self._files:
octoprint.util.silent_remove(f)
# make all http methods trigger _handle_method
get = _handle_method
post = _handle_method
put = _handle_method
patch = _handle_method
delete = _handle_method
head = _handle_method
options = _handle_method
def _parse_header(line, strip_quotes=True):
parts = tornado.httputil._parseparam(";" + line)
key = next(parts)
pdict = {}
for p in parts:
i = p.find("=")
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1 :].strip()
if strip_quotes:
value = _strip_value_quotes(value)
pdict[name] = value
return key, pdict
def _strip_value_quotes(value):
if not value:
return value
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace("\\\\", "\\").replace('\\"', '"')
return value
def _extended_header_value(value):
if not value:
return value
if value.lower().startswith("iso-8859-1'") or value.lower().startswith("utf-8'"):
# RFC 5987 section 3.2
from urllib.parse import unquote
encoding, _, value = value.split("'", 2)
return unquote(value, encoding=encoding)
else:
# no encoding provided, strip potentially present quotes and call it a day
return octoprint.util.to_unicode(_strip_value_quotes(value), encoding="utf-8")
class WsgiInputContainer:
"""
A WSGI container for use with Tornado that allows supplying the request body to be used for ``wsgi.input`` in the
generated WSGI environment upon call.
A ``RequestHandler`` can thus provide the WSGI application with a stream for the request body, or a modified body.
Example usage:
.. code-block:: python
wsgi_app = octoprint.server.util.WsgiInputContainer(octoprint_app)
application = tornado.web.Application([
(r".*", UploadStorageFallbackHandler, dict(fallback=wsgi_app),
])
The implementation logic is basically the same as ``tornado.wsgi.WSGIContainer`` but the ``__call__`` and ``environ``
methods have been adjusted to allow for an optionally supplied ``body`` argument which is then used for ``wsgi.input``.
"""
def __init__(
self, wsgi_application, headers=None, forced_headers=None, removed_headers=None
):
self.wsgi_application = wsgi_application
if headers is None:
headers = {}
if forced_headers is None:
forced_headers = {}
if removed_headers is None:
removed_headers = []
self.headers = headers
self.forced_headers = forced_headers
self.removed_headers = removed_headers
def __call__(self, request, body=None):
"""
Wraps the call against the WSGI app, deriving the WSGI environment from the supplied Tornado ``HTTPServerRequest``.
:param request: the ``tornado.httpserver.HTTPServerRequest`` to derive the WSGI environment from
:param body: an optional body to use as ``wsgi.input`` instead of ``request.body``, can be a string or a stream
"""
data = {}
response = []
def start_response(status, response_headers, exc_info=None):
data["status"] = status
data["headers"] = response_headers
return response.append
app_response = self.wsgi_application(
WsgiInputContainer.environ(request, body), start_response
)
try:
response.extend(app_response)
body = b"".join(response)
finally:
if hasattr(app_response, "close"):
app_response.close()
if not data:
raise Exception("WSGI app did not call start_response")
status_code, reason = data["status"].split(" ", 1)
status_code = int(status_code)
headers = data["headers"]
header_set = {k.lower() for (k, v) in headers}
body = tornado.escape.utf8(body)
if status_code != 304:
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "content-type" not in header_set:
headers.append(("Content-Type", "text/html; charset=UTF-8"))
header_set = {k.lower() for (k, v) in headers}
for header, value in self.headers.items():
if header.lower() not in header_set:
headers.append((header, value))
for header, value in self.forced_headers.items():
headers.append((header, value))
headers = [
(header, value)
for header, value in headers
if header.lower() not in self.removed_headers
]
start_line = tornado.httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
header_obj = tornado.httputil.HTTPHeaders()
for key, value in headers:
header_obj.add(key, value)
request.connection.write_headers(start_line, header_obj, chunk=body)
request.connection.finish()
self._log(status_code, request)
@staticmethod
def environ(request, body=None):
"""
Converts a ``tornado.httputil.HTTPServerRequest`` to a WSGI environment.
An optional ``body`` to be used for populating ``wsgi.input`` can be supplied (either a string or a stream). If not
supplied, ``request.body`` will be wrapped into a ``io.BytesIO`` stream and used instead.
:param request: the ``tornado.httpserver.HTTPServerRequest`` to derive the WSGI environment from
:param body: an optional body to use as ``wsgi.input`` instead of ``request.body``, can be a string or a stream
"""
import io
from tornado.wsgi import to_wsgi_str
# determine the request_body to supply as wsgi.input
if body is not None:
if isinstance(body, (bytes, str)):
request_body = io.BytesIO(tornado.escape.utf8(body))
else:
request_body = body
else:
request_body = io.BytesIO(tornado.escape.utf8(request.body))
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": to_wsgi_str(
tornado.escape.url_unescape(request.path, encoding=None, plus=False)
),
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": str(port),
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": request_body,
"wsgi.errors": sys.stderr,
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
# remove transfer encoding header if chunked, otherwise flask wsgi entrypoint makes input empty
if (
"Transfer-Encoding" in request.headers
and request.headers.get("Transfer-Encoding") == "chunked"
):
request.headers.pop("Transfer-Encoding")
for key, value in request.headers.items():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ
def _log(self, status_code, request):
access_log = logging.getLogger("tornado.access")
if status_code < 400:
log_method = access_log.info
elif status_code < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000 * request.request_time()
summary = request.method + " " + request.uri + " (" + request.remote_ip + ")"
log_method("%d %s %.2fms", status_code, summary, request_time)
# ~~ customized HTTP1Connection implementation
class CustomHTTPServer(tornado.httpserver.HTTPServer):
"""
Custom implementation of ``tornado.httpserver.HTTPServer`` that allows defining max body sizes depending on path and
method.
The implementation is mostly taken from ``tornado.httpserver.HTTPServer``, the only difference is the creation
of a ``CustomHTTP1ConnectionParameters`` instance instead of ``tornado.http1connection.HTTP1ConnectionParameters``
which is supplied with the two new constructor arguments ``max_body_sizes`` and ``max_default_body_size`` and the
creation of a ``CustomHTTP1ServerConnection`` instead of a ``tornado.http1connection.HTTP1ServerConnection`` upon
connection by a client.
``max_body_sizes`` is expected to be an iterable containing tuples of the form (method, path regex, maximum body size),
with method and path regex having to match in order for maximum body size to take affect.
``default_max_body_size`` is the default maximum body size to apply if no specific one from ``max_body_sizes`` matches.
"""
def __init__(self, *args, **kwargs):
pass
def initialize(self, *args, **kwargs):
default_max_body_size = kwargs.pop("default_max_body_size", None)
max_body_sizes = kwargs.pop("max_body_sizes", None)
tornado.httpserver.HTTPServer.initialize(self, *args, **kwargs)
additional = {
"default_max_body_size": default_max_body_size,
"max_body_sizes": max_body_sizes,
}
self.conn_params = CustomHTTP1ConnectionParameters.from_stock_params(
self.conn_params, **additional
)
def handle_stream(self, stream, address):
context = tornado.httpserver._HTTPRequestContext(
stream, address, self.protocol, self.trusted_downstream
)
conn = CustomHTTP1ServerConnection(stream, self.conn_params, context)
self._connections.add(conn)
conn.start_serving(self)
class CustomHTTP1ServerConnection(tornado.http1connection.HTTP1ServerConnection):
"""
A custom implementation of ``tornado.http1connection.HTTP1ServerConnection`` which utilizes a ``CustomHTTP1Connection``
instead of a ``tornado.http1connection.HTTP1Connection`` in ``_server_request_loop``. The implementation logic is
otherwise the same as ``tornado.http1connection.HTTP1ServerConnection``.
"""
@tornado.gen.coroutine
def _server_request_loop(self, delegate):
try:
while True:
conn = CustomHTTP1Connection(
self.stream, False, self.params, self.context
)
request_delegate = delegate.start_request(self, conn)
try:
ret = yield conn.read_response(request_delegate)
except (
tornado.iostream.StreamClosedError,
tornado.iostream.UnsatisfiableReadError,
):
return
except tornado.http1connection._QuietException:
# This exception was already logged.
conn.close()
return
except Exception:
tornado.http1connection.gen_log.error(
"Uncaught exception", exc_info=True
)
conn.close()
return
if not ret:
return
yield tornado.gen.moment
finally:
delegate.on_close(self)
class CustomHTTP1Connection(tornado.http1connection.HTTP1Connection):
"""
A custom implementation of ``tornado.http1connection.HTTP1Connection`` which upon checking the ``Content-Length`` of
the request against the configured maximum utilizes ``max_body_sizes`` and ``default_max_body_size`` as a fallback.
"""
def __init__(self, stream, is_client, params=None, context=None):
if params is None:
params = CustomHTTP1ConnectionParameters()
tornado.http1connection.HTTP1Connection.__init__(
self, stream, is_client, params=params, context=context
)
import re
self._max_body_sizes = list(
map(
lambda x: (x[0], re.compile(x[1]), x[2]),
self.params.max_body_sizes or list(),
)
)
self._default_max_body_size = (
self.params.default_max_body_size or self.stream.max_buffer_size
)
def _read_body(self, code, headers, delegate):
"""
Basically the same as ``tornado.http1connection.HTTP1Connection._read_body``, but determines the maximum
content length individually for the request (utilizing ``._get_max_content_length``).
If the individual max content length is 0 or smaller no content length is checked. If the content length of the
current request exceeds the individual max content length, the request processing is aborted and an
``HTTPInputError`` is raised.
"""
if "Content-Length" in headers:
if "Transfer-Encoding" in headers:
# Response cannot contain both Content-Length and
# Transfer-Encoding headers.
# http://tools.ietf.org/html/rfc7230#section-3.3.3
raise tornado.httputil.HTTPInputError(
"Response with both Transfer-Encoding and Content-Length"
)
if "," in headers["Content-Length"]:
# Proxies sometimes cause Content-Length headers to get
# duplicated. If all the values are identical then we can
# use them but if they differ it's an error.
pieces = re.split(r",\s*", headers["Content-Length"])
if any(i != pieces[0] for i in pieces):
raise tornado.httputil.HTTPInputError(
"Multiple unequal Content-Lengths: %r" % headers["Content-Length"]
)
headers["Content-Length"] = pieces[0]
try:
content_length = int(headers["Content-Length"])
except ValueError:
# Handles non-integer Content-Length value.
raise tornado.httputil.HTTPInputError(
"Only integer Content-Length is allowed: %s"
% headers["Content-Length"]
)
max_content_length = self._get_max_content_length(
self._request_start_line.method, self._request_start_line.path
)
if (
max_content_length is not None
and 0 <= max_content_length < content_length
):
raise tornado.httputil.HTTPInputError("Content-Length too long")
else:
content_length = None
if code == 204:
# This response code is not allowed to have a non-empty body,
# and has an implicit length of zero instead of read-until-close.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
if "Transfer-Encoding" in headers or content_length not in (None, 0):
raise tornado.httputil.HTTPInputError(
"Response with code %d should not have body" % code
)
content_length = 0
if content_length is not None:
return self._read_fixed_body(content_length, delegate)
if headers.get("Transfer-Encoding") == "chunked":
return self._read_chunked_body(delegate)
if self.is_client:
return self._read_body_until_close(delegate)
return None
def _get_max_content_length(self, method, path):
"""
Gets the max content length for the given method and path. Checks whether method and path match against any
of the specific maximum content lengths supplied in ``max_body_sizes`` and returns that as the maximum content
length if available, otherwise returns ``default_max_body_size``.
:param method: method of the request to match against
:param path: path of the request to match against
:return: determine maximum content length to apply to this request, max return 0 for unlimited allowed content
length
"""
for m, p, s in self._max_body_sizes:
if method == m and p.match(path):
return s
return self._default_max_body_size
class CustomHTTP1ConnectionParameters(tornado.http1connection.HTTP1ConnectionParameters):
"""
An implementation of ``tornado.http1connection.HTTP1ConnectionParameters`` that adds two new parameters
``max_body_sizes`` and ``default_max_body_size``.
For a description of these please see the documentation of ``CustomHTTPServer`` above.
"""
def __init__(self, *args, **kwargs):
max_body_sizes = kwargs.pop("max_body_sizes", list())
default_max_body_size = kwargs.pop("default_max_body_size", None)
tornado.http1connection.HTTP1ConnectionParameters.__init__(self, *args, **kwargs)
self.max_body_sizes = max_body_sizes
self.default_max_body_size = default_max_body_size
@classmethod
def from_stock_params(cls, other, **additional):
kwargs = dict(other.__dict__)
for key, value in additional.items():
kwargs[key] = value
return cls(**kwargs)
# ~~ customized large response handler
class LargeResponseHandler(
RequestlessExceptionLoggingMixin, CorsSupportMixin, tornado.web.StaticFileHandler
):
"""
Customized `tornado.web.StaticFileHandler <http://tornado.readthedocs.org/en/branch4.0/web.html#tornado.web.StaticFileHandler>`_
that allows delivery of the requested resource as attachment and access and request path validation through
optional callbacks. Note that access validation takes place before path validation.
Arguments:
path (str): The system path from which to serve files (this will be forwarded to the ``initialize`` method of
:class:``~tornado.web.StaticFileHandler``)
default_filename (str): The default filename to serve if none is explicitly specified and the request references
a subdirectory of the served path (this will be forwarded to the ``initialize`` method of
:class:``~tornado.web.StaticFileHandler`` as the ``default_filename`` keyword parameter). Defaults to ``None``.
as_attachment (bool): Whether to serve requested files with ``Content-Disposition: attachment`` header (``True``)
or not. Defaults to ``False``.
allow_client_caching (bool): Whether to allow the client to cache (by not setting any ``Cache-Control`` or
``Expires`` headers on the response) or not.
access_validation (function): Callback to call in the ``get`` method to validate access to the resource. Will
be called with ``self.request`` as parameter which contains the full tornado request object. Should raise
a ``tornado.web.HTTPError`` if access is not allowed in which case the request will not be further processed.
Defaults to ``None`` and hence no access validation being performed.
path_validation (function): Callback to call in the ``get`` method to validate the requested path. Will be called
with the requested path as parameter. Should raise a ``tornado.web.HTTPError`` (e.g. an 404) if the requested
path does not pass validation in which case the request will not be further processed.
Defaults to ``None`` and hence no path validation being performed.
etag_generator (function): Callback to call for generating the value of the ETag response header. Will be
called with the response handler as parameter. May return ``None`` to prevent the ETag response header
from being set. If not provided the last modified time of the file in question will be used as returned
by ``get_content_version``.
name_generator (function): Callback to call for generating the value of the attachment file name header. Will be
called with the requested path as parameter.
mime_type_guesser (function): Callback to guess the mime type to use for the content type encoding of the
response. Will be called with the requested path on disk as parameter.
is_pre_compressed (bool): if the file is expected to be pre-compressed, i.e, if there is a file in the same
directory with the same name, but with '.gz' appended and gzip-encoded
"""
def initialize(
self,
path,
default_filename=None,
as_attachment=False,
allow_client_caching=True,
access_validation=None,
path_validation=None,
etag_generator=None,
name_generator=None,
mime_type_guesser=None,
is_pre_compressed=False,
stream_body=False,
):
tornado.web.StaticFileHandler.initialize(
self, os.path.abspath(path), default_filename
)
self._as_attachment = as_attachment
self._allow_client_caching = allow_client_caching
self._access_validation = access_validation
self._path_validation = path_validation
self._etag_generator = etag_generator
self._name_generator = name_generator
self._mime_type_guesser = mime_type_guesser
self._is_pre_compressed = is_pre_compressed
self._stream_body = stream_body
def should_use_precompressed(self):
return self._is_pre_compressed and "gzip" in self.request.headers.get(
"Accept-Encoding", ""
)
def get(self, path, include_body=True):
if self._access_validation is not None:
self._access_validation(self.request)
if self._path_validation is not None:
self._path_validation(path)
if "cookie" in self.request.arguments:
self.set_cookie(self.request.arguments["cookie"][0], "true", path="/")
if self.should_use_precompressed():
if os.path.exists(os.path.join(self.root, path + ".gz")):
self.set_header("Content-Encoding", "gzip")
path = path + ".gz"
else:
logging.getLogger(__name__).warning(
"Precompressed assets expected but {}.gz does not exist "
"in {}, using plain file instead.".format(path, self.root)
)
if self._stream_body:
return self.streamed_get(path, include_body=include_body)
else:
return tornado.web.StaticFileHandler.get(
self, path, include_body=include_body
)
@tornado.gen.coroutine
def streamed_get(self, path, include_body=True):
"""
Version of StaticFileHandler.get that doesn't support ranges or ETag but streams the content. Helpful for files
that might still change while being transmitted (e.g. log files)
"""
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(self.root, absolute_path)
if self.absolute_path is None:
return
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
self.set_extra_headers(self.path)
if include_body:
content = self.get_content(self.absolute_path)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except tornado.iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def set_extra_headers(self, path):
if self._as_attachment:
filename = None
if callable(self._name_generator):
filename = self._name_generator(path)
if filename is None:
filename = os.path.basename(path)
filename = tornado.escape.url_escape(filename, plus=False)
self.set_header(
"Content-Disposition",
"attachment; filename=\"{}\"; filename*=UTF-8''{}".format(
filename, filename
),
)
if not self._allow_client_caching:
self.set_header("Cache-Control", "max-age=0, must-revalidate, private")
self.set_header("Expires", "-1")
self.set_header("X-Original-Content-Length", str(self.get_content_size()))
@property
def original_absolute_path(self):
"""The path of the uncompressed file corresponding to the compressed file"""
if self._is_pre_compressed:
return self.absolute_path.rstrip(".gz")
return self.absolute_path
def compute_etag(self):
if self._etag_generator is not None:
etag = self._etag_generator(self)
else:
etag = str(self.get_content_version(self.absolute_path))
if not etag.endswith('"'):
etag = f'"{etag}"'
return etag
# noinspection PyAttributeOutsideInit
def get_content_type(self):
if self._mime_type_guesser is not None:
type = self._mime_type_guesser(self.original_absolute_path)
if type is not None:
return type
correct_absolute_path = None
try:
# reset self.absolute_path temporarily
if self.should_use_precompressed():
correct_absolute_path = self.absolute_path
self.absolute_path = self.original_absolute_path
return tornado.web.StaticFileHandler.get_content_type(self)
finally:
# restore self.absolute_path
if self.should_use_precompressed() and correct_absolute_path is not None:
self.absolute_path = correct_absolute_path
@classmethod
def get_content_version(cls, abspath):
import os
import stat
return os.stat(abspath)[stat.ST_MTIME]
##~~ URL Forward Handler for forwarding requests to a preconfigured static URL
class UrlProxyHandler(
RequestlessExceptionLoggingMixin, CorsSupportMixin, tornado.web.RequestHandler
):
"""
`tornado.web.RequestHandler <http://tornado.readthedocs.org/en/branch4.0/web.html#request-handlers>`_ that proxies
requests to a preconfigured url and returns the response. Allows delivery of the requested content as attachment
and access validation through an optional callback.
This will use `tornado.httpclient.AsyncHTTPClient <http://tornado.readthedocs.org/en/branch4.0/httpclient.html#tornado.httpclient.AsyncHTTPClient>`_
for making the request to the configured endpoint and return the body of the client response with the status code
from the client response and the following headers:
* ``Date``, ``Cache-Control``, ``Expires``, ``ETag``, ``Server``, ``Content-Type`` and ``Location`` will be copied over.
* If ``as_attachment`` is set to True, ``Content-Disposition`` will be set to ``attachment``. If ``basename`` is
set including the attachment's ``filename`` attribute will be set to the base name followed by the extension
guessed based on the MIME type from the ``Content-Type`` header of the response. If no extension can be guessed
no ``filename`` attribute will be set.
Arguments:
url (str): URL to forward any requests to. A 404 response will be returned if this is not set. Defaults to ``None``.
as_attachment (bool): Whether to serve files with ``Content-Disposition: attachment`` header (``True``)
or not. Defaults to ``False``.
basename (str): base name of file names to return as part of the attachment header, see above. Defaults to ``None``.
access_validation (function): Callback to call in the ``get`` method to validate access to the resource. Will
be called with ``self.request`` as parameter which contains the full tornado request object. Should raise
a ``tornado.web.HTTPError`` if access is not allowed in which case the request will not be further processed.
Defaults to ``None`` and hence no access validation being performed.
"""
def initialize(
self, url=None, as_attachment=False, basename=None, access_validation=None
):
tornado.web.RequestHandler.initialize(self)
self._url = url
self._as_attachment = as_attachment
self._basename = basename
self._access_validation = access_validation
@tornado.gen.coroutine
def get(self, *args, **kwargs):
if self._access_validation is not None:
self._access_validation(self.request)
if self._url is None:
raise tornado.web.HTTPError(404)
client = tornado.httpclient.AsyncHTTPClient()
r = tornado.httpclient.HTTPRequest(
url=self._url,
method=self.request.method,
body=self.request.body,
headers=self.request.headers,
follow_redirects=False,
allow_nonstandard_methods=True,
)
try:
return client.fetch(r, self.handle_response)
except tornado.web.HTTPError as e:
if hasattr(e, "response") and e.response:
self.handle_response(e.response)
else:
raise tornado.web.HTTPError(500)
def handle_response(self, response):
if response.error and not isinstance(response.error, tornado.web.HTTPError):
raise tornado.web.HTTPError(500)
filename = None
self.set_status(response.code)
for name in (
"Date",
"Cache-Control",
"Server",
"Content-Type",
"Location",
"Expires",
"ETag",
):
value = response.headers.get(name)
if value:
self.set_header(name, value)
if name == "Content-Type":
filename = self.get_filename(value)
if self._as_attachment:
if filename is not None:
self.set_header(
"Content-Disposition", "attachment; filename=%s" % filename
)
else:
self.set_header("Content-Disposition", "attachment")
if response.body:
self.write(response.body)
self.finish()
def get_filename(self, content_type):
if not self._basename:
return None
typeValue = list(x.strip() for x in content_type.split(";"))
if len(typeValue) == 0:
return None
extension = mimetypes.guess_extension(typeValue[0])
if not extension:
return None
return f"{self._basename}{extension}"
class StaticDataHandler(
RequestlessExceptionLoggingMixin, CorsSupportMixin, tornado.web.RequestHandler
):
"""
`tornado.web.RequestHandler <http://tornado.readthedocs.org/en/branch4.0/web.html#request-handlers>`_ that returns
static ``data`` of a configured ``content_type``.
Arguments:
data (str): The data with which to respond
content_type (str): The content type with which to respond. Defaults to ``text/plain``
"""
def initialize(self, data="", content_type="text/plain"):
self.data = data
self.content_type = content_type
def get(self, *args, **kwargs):
self.set_status(200)
self.set_header("Content-Type", self.content_type)
self.write(self.data)
self.flush()
self.finish()
class DeprecatedEndpointHandler(CorsSupportMixin, tornado.web.RequestHandler):
"""
`tornado.web.RequestHandler <http://tornado.readthedocs.org/en/branch4.0/web.html#request-handlers>`_ that redirects
to another ``url`` and logs a deprecation warning.
Arguments:
url (str): URL to which to redirect
"""
def initialize(self, url):
self._url = url
self._logger = logging.getLogger(__name__)
def _handle_method(self, *args, **kwargs):
to_url = self._url.format(*args)
self._logger.info(
f"Redirecting deprecated endpoint {self.request.path} to {to_url}"
)
self.redirect(to_url, permanent=True)
# make all http methods trigger _handle_method
get = _handle_method
post = _handle_method
put = _handle_method
patch = _handle_method
delete = _handle_method
head = _handle_method
options = _handle_method
class StaticZipBundleHandler(CorsSupportMixin, tornado.web.RequestHandler):
def initialize(
self,
files=None,
as_attachment=True,
attachment_name=None,
access_validation=None,
compress=False,
):
if files is None:
files = []
if as_attachment and not attachment_name:
raise ValueError("attachment name must be set if as_attachment is True")
self._files = files
self._as_attachment = as_attachment
self._attachment_name = attachment_name
self._access_validator = access_validation
self._compress = compress
def get(self, *args, **kwargs):
if self._access_validator is not None:
self._access_validator(self.request)
return self.stream_zip(self._files)
def get_attachment_name(self):
return self._attachment_name
def normalize_files(self, files):
result = []
for f in files:
if isinstance(f, str):
result.append({"path": f})
elif isinstance(f, dict) and ("path" in f or "iter" in f or "content" in f):
result.append(f)
return result
@tornado.gen.coroutine
def stream_zip(self, files):
self.set_header("Content-Type", "application/zip")
if self._as_attachment:
self.set_header(
"Content-Disposition",
f'attachment; filename="{self.get_attachment_name()}"',
)
z = ZipStream(sized=True)
if self._compress:
try:
z = ZipStream(compress_type=ZIP_DEFLATED)
except RuntimeError:
# no zlib support
pass
for f in self.normalize_files(files):
name = f.get("name")
path = f.get("path")
data = f.get("iter") or f.get("content")
if path:
z.add_path(path, arcname=name)
elif data and name:
z.add(data, arcname=name)
if z.sized:
self.set_header("Content-Length", len(z))
self.set_header("Last-Modified", z.last_modified)
for chunk in z:
try:
self.write(chunk)
yield self.flush()
except tornado.iostream.StreamClosedError:
return
class DynamicZipBundleHandler(StaticZipBundleHandler):
# noinspection PyMethodOverriding
def initialize(
self,
path_validation=None,
path_processor=None,
as_attachment=True,
attachment_name=None,
access_validation=None,
compress=False,
):
if as_attachment and not attachment_name:
raise ValueError("attachment name must be set if as_attachment is True")
self._path_validator = path_validation
self._path_processor = path_processor
self._as_attachment = as_attachment
self._attachment_name = attachment_name
self._access_validator = access_validation
self._compress = compress
def get(self, *args, **kwargs):
if self._access_validator is not None:
self._access_validator(self.request)
files = list(
map(octoprint.util.to_unicode, self.request.query_arguments.get("files", []))
)
return self._get_files_zip(files)
def post(self, *args, **kwargs):
if self._access_validator is not None:
self._access_validator(self.request)
import json
content_type = self.request.headers.get("Content-Type", "")
try:
if "application/json" in content_type:
data = json.loads(self.request.body)
else:
data = self.request.body_arguments
except Exception:
raise tornado.web.HTTPError(400)
return self._get_files_zip(
list(map(octoprint.util.to_unicode, data.get("files", [])))
)
def _get_files_zip(self, files):
files = self.normalize_files(files)
if not files:
raise tornado.web.HTTPError(400)
for f in files:
if "path" in f:
if callable(self._path_processor):
path = self._path_processor(f["path"])
if isinstance(path, tuple):
f["name"], f["path"] = path
else:
f["path"] = path
self._path_validator(f["path"])
return self.stream_zip(files)
class SystemInfoBundleHandler(CorsSupportMixin, tornado.web.RequestHandler):
# noinspection PyMethodOverriding
def initialize(self, access_validation=None):
self._access_validator = access_validation
@tornado.gen.coroutine
def get(self, *args, **kwargs):
if self._access_validator is not None:
self._access_validator(self.request)
from octoprint.cli.systeminfo import (
get_systeminfo,
get_systeminfo_bundle,
get_systeminfo_bundle_name,
)
from octoprint.server import (
connectivityChecker,
environmentDetector,
pluginManager,
printer,
safe_mode,
)
from octoprint.settings import settings
systeminfo = get_systeminfo(
environmentDetector,
connectivityChecker,
settings(),
{
"browser.user_agent": self.request.headers.get("User-Agent"),
"octoprint.safe_mode": safe_mode is not None,
"systeminfo.generator": "zipapi",
},
)
z = get_systeminfo_bundle(
systeminfo,
settings().getBaseFolder("logs"),
printer=printer,
plugin_manager=pluginManager,
)
self.set_header("Content-Type", "application/zip")
self.set_header(
"Content-Disposition",
f'attachment; filename="{get_systeminfo_bundle_name()}"',
)
if z.sized:
self.set_header("Content-Length", len(z))
self.set_header("Last-Modified", z.last_modified)
for chunk in z:
try:
self.write(chunk)
yield self.flush()
except tornado.iostream.StreamClosedError:
return
def get_attachment_name(self):
import time
return "octoprint-systeminfo-{}.zip".format(time.strftime("%Y%m%d%H%M%S"))
class GlobalHeaderTransform(tornado.web.OutputTransform):
HEADERS = {}
FORCED_HEADERS = {}
REMOVED_HEADERS = []
@classmethod
def for_headers(cls, name, headers=None, forced_headers=None, removed_headers=None):
if headers is None:
headers = {}
if forced_headers is None:
forced_headers = {}
if removed_headers is None:
removed_headers = []
return type(
name,
(GlobalHeaderTransform,),
{
"HEADERS": headers,
"FORCED_HEADERS": forced_headers,
"REMOVED_HEADERS": removed_headers,
},
)
def __init__(self, request):
tornado.web.OutputTransform.__init__(self, request)
def transform_first_chunk(self, status_code, headers, chunk, finishing):
for header, value in self.HEADERS.items():
if header not in headers:
headers[header] = value
for header, value in self.FORCED_HEADERS.items():
headers[header] = value
for header in self.REMOVED_HEADERS:
del headers[header]
return status_code, headers, chunk
# ~~ Factory method for creating Flask access validation wrappers from the Tornado request context
def access_validation_factory(app, validator, *args):
"""
Creates an access validation wrapper using the supplied validator.
:param validator: the access validator to use inside the validation wrapper
:return: an access validator taking a request as parameter and performing the request validation
"""
# noinspection PyProtectedMember
def f(request):
"""
Creates a custom wsgi and Flask request context in order to be able to process user information
stored in the current session.
:param request: The Tornado request for which to create the environment and context
"""
import flask
wsgi_environ = WsgiInputContainer.environ(request)
with app.request_context(wsgi_environ):
session = app.session_interface.open_session(app, flask.request)
user_id = session.get("_user_id")
user = None
# Yes, using protected methods is ugly. But these used to be publicly available in former versions
# of flask-login, there are no replacements, and seeing them renamed & hidden in a minor version release
# without any mention in the changelog means the public API ain't strictly stable either, so we might
# as well make our life easier here and just use them...
if user_id is not None and app.login_manager._user_callback is not None:
user = app.login_manager._user_callback(user_id)
app.login_manager._update_request_context_with_user(user)
validator(flask.request, *args)
return f
def path_validation_factory(path_filter, status_code=404):
"""
Creates a request path validation wrapper returning the defined status code if the supplied path_filter returns False.
:param path_filter: the path filter to use on the requested path, should return False for requests that should
be responded with the provided error code.
:return: a request path validator taking a request path as parameter and performing the request validation
"""
def f(path):
if not path_filter(path):
raise tornado.web.HTTPError(status_code)
return f
def validation_chain(*validators):
def f(request):
for validator in validators:
validator(request)
return f | PypiClean |
/Asciin.py-0.1.7.tar.gz/Asciin.py-0.1.7/Asciinpy/amath.py | from __future__ import division
GRADIENT = (
lambda P1, P2: None if P2[0] - P1[0] == 0 else (P2[1] - P1[1]) / (P2[0] - P1[0])
)
class Matrix:
"""
A matrix class that supports up to 10x10 matrixes.
Supports scalar multiplication, pretty printing, equality checks,
matrix multiplication and alias references such as x for element 0 and
y for element 1.
:param layers:
The layers of the Matrix, a matrix can contain other matrixes.
:type layers: Union[Tuple[:class:`int`], :class:`Matrix`]
"""
NAME_SPACE = ("x", "y", "z", "k", "a", "e", "i", "o", "u")
def __init__(self, *layers):
self.dimension = dict(
zip(
self.NAME_SPACE,
(
layer if not isinstance(layer, (tuple, list)) else Matrix(*layer)
for layer in layers
),
)
)
self.__dict__.update(self.dimension)
def __eq__(self, o):
to_cmpr = []
to_cmpr.extend(list(self.dimension.keys()))
to_cmpr.extend(list(o.dimension.keys()))
to_cmpr = set(to_cmpr)
return all(self.__dict__.get(attr) == o.__dict__.get(attr) for attr in to_cmpr)
def __ne__(self, o):
return not self.__eq__(o)
def __repr__(self):
return (
"["
+ " ".join(
(
val
if not isinstance(val, Matrix)
else ("\n " if i != 0 else "") + val.__repr__()
)
for i, val in enumerate(self.dimension.values())
)
+ "]"
)
def __len__(self):
return len(self.dimension)
def __mul__(self, other):
# type: (Union[Matrix, int]) -> Matrix
"""
The number of columns of the 1st matrix must equal the number of rows of the 2nd matrix in multiplicatioon.
And the result will have the same number of rows as the 1st matrix, and the same number of columns as the 2nd matrix.
Matrix Multiplication,
Scalar multiplication just multiplies every component of a matrix with the multiplier
In a matrix to matrix multiplication, consider their sizes,
in format :: row x column
Matrix A: MA = 1x2 [[1 1] Matrix B: MB = 2x1 [[0 0]
[1 1]] [1 1]]
Col of MA == Row of MB or is incompatible
that means MA(1x2) MB(2x1)
\ \_EA__/ /
\____EB_/
expression A: EA = column(MA) == row(MB) represents the comparison expression needed to be true for compatibility
expression B: EB = row(MA), column(MB) represents the dimension of the resultant matrix
"""
if isinstance(other, Matrix):
# self columns must equal other rows
if len(self.dimension) != len(other.dimension["x"]):
raise TypeError("uncompatible to multiple these matrixes")
else:
self_vals = list(self.dimension.values())
other_vals = list(other.dimension.values())
pass
else:
# scalar multiplication
return M(*[val * other for val in list(self.dimension.values())])
class Line:
"""
A conceptual line class with simple properties.
:param p1:
Starting point
:type p1: List[:class:`int`, :class:`int`]
:param p2:
Endpoint
:type p2: List[:class:`int`, :class:`int`]
"""
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.gradient = GRADIENT(
p1, p2
) #: Union[:class:`int`, :class:`int`]: The gradient of the line
self.equation = (
self._get_equation()
) #: Callable[[:class:`int`], Tuple[:class:`int`, :class:`int`]]: f(x) of the line that takes in x to return the (x,y) at that point
self.inverse_equation = (
self._get_inverse_equation()
) #: Callable[[:class:`int`], Tuple[:class:`int`, :class:`int`]]: inverse f(x) of the line that takes in y to return the (x,y) at that point
self._points = None
def __getitem__(self, x):
return self.equation(x)
@property
def points(self):
"""
The points that join p1 to p2.
:type: :class:`int`
"""
if self._points is None or self._points[1] != [self.p1, self.p2]:
self._points = self._get_points(), [self.p1[:], self.p2[:]]
return self._points[0]
def _get_points(self):
points_set = []
if self.gradient is not None:
points_set.extend(
[
self.equation(x)
for x in range(
*(
(self.p1[0], self.p2[0] + 1)
if self.p1[0] - self.p2[0] < 0
else (self.p2[0], self.p1[0] + 1)
)
)
]
)
points_set.extend(
[
self.inverse_equation(y)
for y in range(
*(
(self.p1[1], self.p2[1] + 1)
if self.p1[1] - self.p2[1] < 0
else (self.p2[1], self.p1[1] + 1)
)
)
]
)
return set(points_set)
def _get_equation(self):
if self.p1[1] - self.p2[1] == 0:
return lambda x: (x, self.p1[1])
elif self.gradient is None or self.gradient == 0:
return lambda y: (self.p1[0], y)
else:
return lambda x: (
x,
(self.gradient * x) - (self.gradient * self.p1[0]) + self.p1[1],
)
def _get_inverse_equation(self):
if self.gradient is None or self.gradient == 0:
return lambda y: (self.p1[0], y)
else:
return lambda y: (((y - self.p1[1]) / self.gradient) + self.p1[0], y)
class MatrixFactory:
def __getitem__(self, layers):
return Matrix(*layers)
def __call__(self, *layers):
return Matrix(*layers)
M = MatrixFactory() | PypiClean |
/Helmholtz-0.2.0.tar.gz/Helmholtz-0.2.0/helmholtz/analysis/tools/core/pins.py | class Pin(object) :
"""Describe an Input or Output of a Component :
- name : name (alias) of the pin
- type : type constraint
- usecase : description of the pin,
- potential : an object of type Connection,
- component : the component that use this pin,
- connected_to : list of other pins connected to the current pin
"""
def __init__(self,constraint,usecase) :
super(Pin, self).__init__()
self.constraint = constraint
self.usecase = usecase
self.name = None
self.potential = None
self.component = None
self.connected_to = []
def _type(self):
return self.__class__.__name__
type = property(_type)
#These classes discriminate the different component pins
class Input(Pin) :
"""Input of a Component."""
def __init__(self,constraint=None, usecase=None) :
super(Input,self).__init__(constraint,usecase)
class DatabaseObject(Input):
"""Input that extract data from the database."""
def __init__(self,manager,entity,usecase=None) :
self.manager = manager
super(DatabaseObject,self).__init__(entity,usecase)
class DatabaseValue(Input):
"""Input that extract data from a field of the database."""
def __init__(self,manager,entity,field,usecase=None) :
assert hasattr(entity,field)
self.manager = manager
self.object = None
self.field = field
super(DatabaseObject,self).__init__(entity,usecase)
class Output(Pin) :
"""Output of a component."""
def __init__(self,constraint=None,usecase=None) :
super(Output,self).__init__(constraint,usecase)
class FileOutput(Output) :
"""Output of a component stored in a file."""
def __init__(self,root=None,usecase=None) :
self.root = None
super(Output,self).__init__(str,usecase)
class Parameter(Input) :
"""Input considered as a parameter of a component."""
def __init__(self,constraint=None,usecase=None) :
super(Parameter,self).__init__(constraint,usecase)
class DatabaseObjectParameter(Input):
"""Input that extract data from the database."""
def __init__(self,manager,entity,usecase=None) :
self.manager = manager
super(DatabaseObject,self).__init__(entity,usecase)
class DatabaseValueParameter(Input):
"""Input that extract data from a field of the database."""
def __init__(self,manager,entity,field,usecase=None) :
assert hasattr(entity,field)
self.manager = manager
self.object = None
self.field = field
super(DatabaseObject,self).__init__(entity,usecase) | PypiClean |
/Editobj3-0.2.tar.gz/Editobj3-0.2/undoredo.py |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
editobj3.undoredo: Multiple undo/redo framework
-----------------------------------------------
This module contains a multiple undo/redo framework. It is used by Editobj3 dialog boxes,
and it automatically call :func:`editobj3.observe.scan()` when doing or undoing an operation.
.. data:: stack
:annotation: the default undo/redo stack.
"""
__all__ = ["Stack", "UndoableOperation"]
import editobj3.observe as observe
class Stack(object):
"""An undo/redo stack.
:param limit: the maximum number of undo/redo, defaults to 20."""
def __init__(self, limit = 20):
self.limit = limit
self.undoables = []
self.redoables = []
def can_undo(self):
"""Returns True if it is possible to undo an operation."""
if self.undoables: return self.undoables[-1]
return False
def can_redo(self):
"""Returns True if it is possible to redo an operation."""
if self.redoables: return self.redoables[-1]
return False
def do_operation(self, operation):
"""Does the operation. Can be overriden, e.g. to save data after the changes performed by the operation.
:param operation: the operation.
:type operation: :class:`Operation`"""
operation.do_func()
def undo(self):
"""Undoes the last operation available."""
if not self.undoables: raise ValueError("No operation to undo!")
undo = self.undoables.pop()
opposite = undo.opposite()
def redo(self):
"""Redoes the last operation available."""
if not self.redoables: raise ValueError("No operation to redo!")
redo = self.redoables.pop()
opposite = redo.opposite()
def clear(self):
"""Clears all undo/redo operations."""
self.undoables = []
self.redoables = []
def merge_last_operations(self, name = "", nb = 2):
"""Merges the NB last operations. They will now be undone / redone as a single operation, with the given NAME."""
if not name: name = ", ".join(undoable.name for undoable in self.undoables[-nb:])
doers = [undoable.do_func for undoable in self.undoables[-nb:]]
undoers = [undoable.undo_func for undoable in self.undoables[-nb:]]
def do_it():
for doer in doers: doer()
def undo_it():
for undoer in reversed(undoers): undoer()
del self.undoables[-nb + 1:]
self.undoables[-1].do_func = do_it
self.undoables[-1].undo_func = undo_it
self.undoables[-1].name = name
return self.undoables[-1]
def __repr__(self):
return "<%s, undoables:\n%s\n redoables:\n%s\n>" % (
self.__class__.__name__,
"\n".join([" %s" % repr(i) for i in self.undoables]),
"\n".join([" %s" % repr(i) for i in self.redoables]),
)
stack = Stack()
class _Operation(object):
def __init__(self, do_func, undo_func, name = "", stack_ = stack):
self.do_func = do_func
self.undo_func = undo_func
self.name = name
self.stack = stack_ or stack
stack.do_operation(self)
def __repr__(self):
return "<%s '%s' do_func='%s' undo_func='%s'>" % (self.__class__.__name__, self.name, self.do_func, self.undo_func)
class UndoableOperation(_Operation):
"""UndoableOperation(do_func, undo_func, name = "", stack = undoredo.stack)
An operation that can be undone.
:param do_func: a callable that do the operation when called.
:param undo_func: a callable that undo the operation when called.
:param name: the name of the operation.
:param stack: the undo/redo stack to add the operation to, defaults to undoredo.stack.
"""
def __init__(self, do_func, undo_func, name = "", stack = None):
_Operation.__init__(self, do_func, undo_func, name, stack)
stack.undoables.append(self)
if len(self.stack.undoables) > self.stack.limit: del self.stack.undoables[0]
observe.scan()
def opposite(self):
return _RedoableOperation(self.undo_func, self.do_func, self.name, self.stack)
def coalesce_with(self, previous_undoable_operation):
self.undo_func = previous_undoable_operation.undo_func
previous_undoable_operation.stack.undoables.remove(previous_undoable_operation)
class _RedoableOperation(_Operation):
def __init__(self, do_func, undo_func, name = "", stack = stack):
_Operation.__init__(self, do_func, undo_func, name, stack)
stack.redoables.append(self)
observe.scan()
def opposite(self):
return UndoableOperation(self.undo_func, self.do_func, self.name, self.stack) | PypiClean |
/FastCNN2-1.23.425.1716.tar.gz/FastCNN2-1.23.425.1716/FastCNN/prx/YoloV5TrainProxy.py | import argparse
import math
import os
import random
import sys
import time
from copy import deepcopy
from datetime import datetime
from pathlib import Path
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import yaml
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD, Adam, AdamW, lr_scheduler
from tqdm import tqdm
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from FastCNN.utils.Logs import tlog
import FastCNN.prx.YoloV5ValidProxy as val # for end-of-epoch mAP
from FastCNN.models.experimental import attempt_load
from FastCNN.models.yolo import Model
from FastCNN.utils.autoanchor import check_anchors
from FastCNN.utils.autobatch import check_train_batch_size
from FastCNN.utils.CallBacks import Callbacks
from FastCNN.utils.dataloaders import create_dataloader
from FastCNN.utils.downloads import attempt_download
from FastCNN.utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size,
check_requirements, check_suffix, check_version, check_yaml, colorstr, get_latest_run,
increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer)
from FastCNN.utils.loggers import Loggers
from FastCNN.utils.loggers.wandb.wandb_utils import check_wandb_resume
from FastCNN.utils.loss import ComputeLoss
from FastCNN.utils.metrics import fitness
from FastCNN.utils.plots import plot_evolve, plot_labels
from FastCNN.utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv('RANK', -1))
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
from FastCNN.prx.PathProxy import PathProxy3 as PathProxy
from IutyLib.commonutil.config import JConfig
from IutyLib.file.files import CsvFile
def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary
save_dir,data, epochs, batch_size, weights, single_cls, evolve, cfg, resume, noval, nosave, workers, freeze,fcnn,projectid,modelid = \
Path(opt.save_dir),opt.data, opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.cfg, \
opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze,opt.fcnn,opt.projectid,opt.modelid
callbacks.run('on_pretrain_routine_start')
# Directories
if fcnn:
last = PathProxy.getTrainCKPT(projectid,modelid)[:-1]
best = PathProxy.getValidCKPT(projectid,modelid)[:-1]
trainlog = PathProxy.getTrainProcessValues(projectid,modelid)
else:
w = save_dir / 'weights' # weights dir
(w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
last, best = w / 'last.pt', w / 'best.pt'
# Hyperparameters
if isinstance(hyp, str):
with open(hyp, errors='ignore') as f:
hyp = yaml.safe_load(f) # load hyps dict
LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
# Save run settings
if not evolve and not fcnn:
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.safe_dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.safe_dump(vars(opt), f, sort_keys=False)
# Loggers
data_dict = None
if RANK in {-1, 0}:
loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
if loggers.wandb:
data_dict = loggers.wandb.data_dict
if resume:
weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
# Register actions
for k in methods(loggers):
callbacks.register_action(k, callback=getattr(loggers, k))
# Config
plots = not evolve and not opt.noplots # create plots
cuda = device.type != 'cpu'
init_seeds(1 + RANK)
if not fcnn:
with torch_distributed_zero_first(LOCAL_RANK):
data_dict = data_dict or check_dataset(data) # check if None
train_path, val_path = data_dict['train'], data_dict['val']
nc = 1 if single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check
is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
else:
config = getConfig(projectid,modelid)
names = config["LabelList"]
nc = len(names)
train_path = os.path.join(PathProxy.getModelDir(projectid,modelid),"Dataset1.json")
val_path = os.path.join(PathProxy.getModelDir(projectid,modelid),"Dataset2.json")
os.makedirs(os.path.dirname(last),exist_ok=True)
os.makedirs(os.path.dirname(best),exist_ok=True)
# Model
check_suffix(weights, '.pt') # check weights
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(LOCAL_RANK):
weights = attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(csd, strict=False) # load
LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
else:
model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
amp = check_amp(model) # check AMP
# Freeze
freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
LOGGER.info(f'freezing {k}')
v.requires_grad = False
# Image size
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
# Batch size
if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
batch_size = check_train_batch_size(model, imgsz, amp)
loggers.on_params_update({"batch_size": batch_size})
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}")
g = [], [], [] # optimizer parameter groups
bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d()
for v in model.modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias
g[2].append(v.bias)
if isinstance(v, bn): # weight (no decay)
g[1].append(v.weight)
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay)
g[0].append(v.weight)
if opt.optimizer == 'Adam':
optimizer = Adam(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
elif opt.optimizer == 'AdamW':
optimizer = AdamW(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = SGD(g[2], lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': g[0], 'weight_decay': hyp['weight_decay']}) # add g0 with weight_decay
optimizer.add_param_group({'params': g[1]}) # add g1 (BatchNorm2d weights)
LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups "
f"{len(g[1])} weight (no decay), {len(g[0])} weight, {len(g[2])} bias")
del g
# Scheduler
if opt.cos_lr:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
else:
lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if RANK in {-1, 0} else None
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# EMA
if ema and ckpt.get('ema'):
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
ema.updates = ckpt['updates']
# Epochs
start_epoch = ckpt['epoch'] + 1
if resume:
assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.'
if epochs < start_epoch:
LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.")
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, csd
if fcnn:
sp = JConfig(PathProxy.getSuperParamConfigPath(projectid,modelid)).get()
epochs = int(sp["Epcho"])
# DP mode
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and RANK != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
LOGGER.info('Using SyncBatchNorm()')
# Trainloader
train_loader, dataset = create_dataloader(train_path,
imgsz,
batch_size // WORLD_SIZE,
gs,
single_cls,
hyp=hyp,
augment=True,
cache=None if opt.cache == 'val' else opt.cache,
rect=opt.rect,
rank=LOCAL_RANK,
workers=workers,
image_weights=opt.image_weights,
quad=opt.quad,
prefix=colorstr('train: '),
shuffle=True,
fcnn = fcnn)
mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class
nb = len(train_loader) # number of batches
assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
# Process 0
if RANK in {-1, 0}:
val_loader = create_dataloader(val_path,
imgsz,
batch_size // WORLD_SIZE * 2,
gs,
single_cls,
hyp=hyp,
cache=None if noval else opt.cache,
rect=True,
rank=-1,
workers=workers * 2,
pad=0.5,
prefix=colorstr('val: '),
fcnn=fcnn)[0]
if not resume:
labels = np.concatenate(dataset.labels, 0)
# c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, names, save_dir)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
#callbacks.run('on_pretrain_routine_end')
# DDP mode
if cuda and RANK != -1:
if check_version(torch.__version__, '1.11.0'):
model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True)
else:
model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)
# Model attributes
nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
hyp['box'] *= 3 / nl # scale to layers
hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
hyp['label_smoothing'] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
last_opt_step = -1
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = torch.cuda.amp.GradScaler(enabled=amp)
stopper = EarlyStopping(patience=opt.patience)
compute_loss = ComputeLoss(model) # init loss class
callbacks.run('on_train_start')
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
f"Logging results to {colorstr('bold', save_dir)}\n"
f'Starting training for {epochs} epochs...')
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
callbacks.run('on_train_epoch_start')
model.train()
# Update image weights (optional, single-GPU only)
if opt.image_weights:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Update mosaic border (optional)
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(3, device=device) # mean losses
if RANK != -1:
train_loader.sampler.set_epoch(epoch)
pbar = enumerate(train_loader)
LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size'))
if RANK in {-1, 0}:
pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
callbacks.run('on_train_batch_start')
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with torch.cuda.amp.autocast(amp):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if RANK != -1:
loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.
# Backward
scaler.scale(loss).backward()
# Optimize
if ni - last_opt_step >= accumulate:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
last_opt_step = ni
# Log
if RANK in {-1, 0}:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
pbar.set_description(('%10s' * 2 + '%10.4g' * 5) %
(f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
#callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots)
if callbacks.stop_training:
return
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for loggers
scheduler.step()
if RANK in {-1, 0}:
# mAP
callbacks.run('on_train_epoch_end', epoch=epoch)
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
if not noval or final_epoch: # Calculate mAP
if fcnn:
results, maps, _ = val.run(config,
batch_size=batch_size // WORLD_SIZE * 2,
imgsz=imgsz,
model=ema.ema,
single_cls=single_cls,
dataloader=val_loader,
save_dir=save_dir,
plots=False,
callbacks=callbacks,
compute_loss=compute_loss,
fcnn=True)
else:
results, maps, _ = val.run(data_dict,
batch_size=batch_size // WORLD_SIZE * 2,
imgsz=imgsz,
model=ema.ema,
single_cls=single_cls,
dataloader=val_loader,
save_dir=save_dir,
plots=False,
callbacks=callbacks,
compute_loss=compute_loss)
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
log_vals = list(mloss) + list(results) + lr
#tpv
tlog.add(trainlog,(log_vals[-9],log_vals[-8],0.0,log_vals[-9],log_vals[-8],0.0))
#callback.run('on_fit_epoch_end',list(results))
#callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
# Save model
if (not nosave) or (final_epoch and not evolve): # if save
ckpt = {
'epoch': epoch,
'best_fitness': best_fitness,
'model': deepcopy(de_parallel(model)).half(),
'ema': deepcopy(ema.ema).half(),
'updates': ema.updates,
'optimizer': optimizer.state_dict(),
#'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None,
'date': datetime.now().isoformat()}
# Save last, best and delete
torch.save(ckpt, last)
saveModelEpoch(last)
if best_fitness == fi:
torch.save(ckpt, best)
saveModelEpoch(best)
if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0):
torch.save(ckpt, w / f'epoch{epoch}.pt')
del ckpt
callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
# Stop Single-GPU
if RANK == -1 and stopper(epoch=epoch, fitness=fi):
break
# Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576
# stop = stopper(epoch=epoch, fitness=fi)
# if RANK == 0:
# dist.broadcast_object_list([stop], 0) # broadcast 'stop' to all ranks
# Stop DPP
# with torch_distributed_zero_first(RANK):
# if stop:
# break # must break all DDP ranks
# end epoch ----------------------------------------------------------------------------------------------------
# end training -----------------------------------------------------------------------------------------------------
if RANK in {-1, 0}:
LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
"""
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if f is best:
LOGGER.info(f'\nValidating {f}...')
results, _, _ = val.run(
data_dict,
batch_size=batch_size // WORLD_SIZE * 2,
imgsz=imgsz,
model=attempt_load(f, device).half(),
iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65
single_cls=single_cls,
dataloader=val_loader,
save_dir=save_dir,
save_json=is_coco,
verbose=True,
plots=plots,
callbacks=callbacks,
compute_loss=compute_loss) # val best model with plots
if is_coco:
callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
"""
#callbacks.run('on_train_end', last, best, plots, epoch, results)
torch.cuda.empty_cache()
return results
def saveModelEpoch(path):
dirpath = os.path.dirname(path)
csv = CsvFile("tpv",os.path.dirname(dirpath))
epoch = len(csv.getData())
f = open(os.path.join(dirpath,"model.epoch"),'w')
f.write(str(epoch))
f.close()
def parse_opt(known=False):
parser = argparse.ArgumentParser()
parser.add_argument('--fcnn', action='store_true', help='not a fcnn project')
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5n.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--noval', action='store_true', help='only validate final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
parser.add_argument('--noplots', action='store_true', help='save no plot files')
parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--workers', type=int, default=0, help='max dataloader workers (per RANK in DDP mode)')
parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
# Weights & Biases arguments
parser.add_argument('--entity', default=None, help='W&B: Entity')
parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option')
parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval')
parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use')
opt = parser.parse_known_args()[0] if known else parser.parse_args()
return opt
def main(opt, callbacks=Callbacks()):
# Checks
if RANK in {-1, 0}:
print_args(vars(opt))
check_git_status()
check_requirements(exclude=['thop'])
# Resume
if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f:
opt = argparse.Namespace(**yaml.safe_load(f)) # replace
opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate
LOGGER.info(f'Resuming training from {ckpt}')
elif opt.fcnn:
opt.save_dir = PathProxy.getModelDir(opt.projectid,opt.modelid)
opt.cfg, opt.hyp, opt.weights, opt.project = \
check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project)
else:
opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
if opt.evolve:
if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve
opt.project = str(ROOT / 'runs/evolve')
opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
if opt.name == 'cfg':
opt.name = Path(opt.cfg).stem # use model.yaml as name
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
# DDP mode
device = select_device(opt.device, batch_size=opt.batch_size)
if LOCAL_RANK != -1:
msg = 'is not compatible with YOLOv5 Multi-GPU DDP training'
assert not opt.image_weights, f'--image-weights {msg}'
assert not opt.evolve, f'--evolve {msg}'
assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
torch.cuda.set_device(LOCAL_RANK)
device = torch.device('cuda', LOCAL_RANK)
dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
# Train
if not opt.evolve:
train(opt.hyp, opt, device, callbacks)
if WORLD_SIZE > 1 and RANK == 0:
LOGGER.info('Destroying process group... ')
dist.destroy_process_group()
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {
'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
'box': (1, 0.02, 0.2), # box loss gain
'cls': (1, 0.2, 4.0), # cls loss gain
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
'iou_t': (0, 0.1, 0.7), # IoU training threshold
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
'mixup': (1, 0.0, 1.0), # image mixup (probability)
'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
with open(opt.hyp, errors='ignore') as f:
hyp = yaml.safe_load(f) # load hyps dict
if 'anchors' not in hyp: # anchors commented in hyp.yaml
hyp['anchors'] = 3
opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
if opt.bucket:
os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists
for _ in range(opt.evolve): # generations to evolve
if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
mp, s = 0.8, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
ng = len(meta)
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = float(x[i + 7] * v[i]) # mutate
# Constrain to limits
for k, v in meta.items():
hyp[k] = max(hyp[k], v[1]) # lower limit
hyp[k] = min(hyp[k], v[2]) # upper limit
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
results = train(hyp.copy(), opt, device, callbacks)
callbacks = Callbacks()
# Write mutation results
print_mutation(results, hyp.copy(), save_dir, opt.bucket)
# Plot results
plot_evolve(evolve_csv)
LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
f"Results saved to {colorstr('bold', save_dir)}\n"
f'Usage example: $ python train.py --hyp {evolve_yaml}')
def cbtest(*args,**kwargs):
print("*"*10)
for arg in args:
print(arg)
print("*"*10)
print(args[0][-9])
#print(kwargs)
def run(**kwargs):
# Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
opt = parse_opt(True)
for k, v in kwargs.items():
setattr(opt, k, v)
main(opt)
return opt
def getConfig(projectid,modelid):
cfgpath = PathProxy.getConfigPath(projectid,modelid)
jfile = JConfig(cfgpath)
data = jfile.get()
return data
def doTrain(projectid,modelid):
weightspath = PathProxy.getWeightsPath()
config = getConfig(projectid,modelid)
parser = argparse.ArgumentParser()
parser.add_argument('--projectid', type=str, default=projectid, help='the id of project')
parser.add_argument('--modelid', type=str, default=modelid, help='the id of model')
parser.add_argument('--weights', type=str, default=os.path.join(weightspath,config["Type"]+".pt"), help='initial weights path')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--fcnn', action='store_false', help='not a fcnn project')
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=30)
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--noval', action='store_true', help='only validate final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
parser.add_argument('--noplots', action='store_true', help='save no plot files')
parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--workers', type=int, default=0, help='max dataloader workers (per RANK in DDP mode)')
parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
# Weights & Biases arguments
parser.add_argument('--entity', default=None, help='W&B: Entity')
parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option')
parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval')
parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use')
opt = parser.parse_args()
print(opt)
cb = Callbacks()
cb.register_action("on_update_tpv",callback = writeTPV)
main(opt)
pass
def writeTPV(*args):
pass
if __name__ == "__main__":
#opt = parse_opt()
#callback = Callbacks()
#callback.register_action("on_fit_epoch_end",callback = cbtest)
#main(opt,callback)
pass | PypiClean |
/BulkWhois-0.2.1.tar.gz/BulkWhois-0.2.1/README.txt | ===========
BulkWhois
===========
BulkWhois provides a simple interface to several bulk whois servers. This
allows you to look up the ASNs, AS names, country codes, and other assorted
information very efficiently for a large number of IP addresses.
Currently implemented bulk whois servers are:
Cymru: BulkWhoisCymru
Shadowserver: BulkWhoisShadowserver
Note that these whois servers generally only accept IPv4 IP addresses, not
hostnames. IPv6 support is not widely supported by bulk whois servers at
present, but will add in support once this becomes available.
So, it's up to the caller to convert hostnames to IP addresses first.
Anything which isn't an IPv4 address generates a warning and is dropped
before sending to the whois server.
Usage::
#!/usr/bin/env python
from bulkwhois.shadowserver import BulkWhoisShadowserver
bulk_whois = BulkWhoisShadowserver()
records = bulk_whois.lookup_ips(["212.58.246.91", "203.2.218.214"])
for record in records:
print "\t".join([records[record]["ip"], records[record]["asn"],
records[record]["as_name"], records[record]["cc"]])
Installation
============
python setup.py install
Implementation
==============
Current implementation assumes accessing a bulk whois server with a telnet-like
interface. Generally, input takes the form of:
begin
192.168.0.1
192.168.0.2
end
Note that different bulk whois servers return different data, so better to
choose one you're happy with first and stick with it to keep things
consistent.
For example, using different modules the sample code returns this:
BulkWhoisShadowServer
203.2.218.214 9342 ABCNET-AS AU
212.58.246.91 2818 BBC UK
BulkWhoisCymru
203.2.218.214 9342 ABCNET-AS-AP Australian Broadcasting Commission AU
212.58.246.91 2818 BBC BBC Internet Services, UK GB
Further information:
http://www.shadowserver.org/wiki/pmwiki.php/Services/IP-BGP
http://www.team-cymru.org/Services/ip-to-asn.html
| PypiClean |
/DZDutils-1.7.4.tar.gz/DZDutils-1.7.4/LICENSE.md | Copyright 2019 German Center for Diabetes Research (DZD)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | PypiClean |
/AWS_pypi_exercise-0.1.tar.gz/AWS_pypi_exercise-0.1/AWS_pypi_exercise/Gaussiandistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/full/adapters/jquery.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
(function(a){if("undefined"==typeof a)throw Error("jQuery should be loaded before CKEditor jQuery adapter.");if("undefined"==typeof CKEDITOR)throw Error("CKEditor should be loaded before CKEditor jQuery adapter.");CKEDITOR.config.jqueryOverrideVal="undefined"==typeof CKEDITOR.config.jqueryOverrideVal?!0:CKEDITOR.config.jqueryOverrideVal;a.extend(a.fn,{ckeditorGet:function(){var a=this.eq(0).data("ckeditorInstance");if(!a)throw"CKEditor is not initialized yet, use ckeditor() with a callback.";return a},
ckeditor:function(g,e){if(!CKEDITOR.env.isCompatible)throw Error("The environment is incompatible.");if(!a.isFunction(g)){var m=e;e=g;g=m}var k=[];e=e||{};this.each(function(){var b=a(this),c=b.data("ckeditorInstance"),f=b.data("_ckeditorInstanceLock"),h=this,l=new a.Deferred;k.push(l.promise());if(c&&!f)g&&g.apply(c,[this]),l.resolve();else if(f)c.once("instanceReady",function(){setTimeout(function d(){c.element?(c.element.$==h&&g&&g.apply(c,[h]),l.resolve()):setTimeout(d,100)},0)},null,null,9999);
else{if(e.autoUpdateElement||"undefined"==typeof e.autoUpdateElement&&CKEDITOR.config.autoUpdateElement)e.autoUpdateElementJquery=!0;e.autoUpdateElement=!1;b.data("_ckeditorInstanceLock",!0);c=a(this).is("textarea")?CKEDITOR.replace(h,e):CKEDITOR.inline(h,e);b.data("ckeditorInstance",c);c.on("instanceReady",function(e){var d=e.editor;setTimeout(function n(){if(d.element){e.removeListener();d.on("dataReady",function(){b.trigger("dataReady.ckeditor",[d])});d.on("setData",function(a){b.trigger("setData.ckeditor",
[d,a.data])});d.on("getData",function(a){b.trigger("getData.ckeditor",[d,a.data])},999);d.on("destroy",function(){b.trigger("destroy.ckeditor",[d])});d.on("save",function(){a(h.form).submit();return!1},null,null,20);if(d.config.autoUpdateElementJquery&&b.is("textarea")&&a(h.form).length){var c=function(){b.ckeditor(function(){d.updateElement()})};a(h.form).submit(c);a(h.form).bind("form-pre-serialize",c);b.bind("destroy.ckeditor",function(){a(h.form).unbind("submit",c);a(h.form).unbind("form-pre-serialize",
c)})}d.on("destroy",function(){b.removeData("ckeditorInstance")});b.removeData("_ckeditorInstanceLock");b.trigger("instanceReady.ckeditor",[d]);g&&g.apply(d,[h]);l.resolve()}else setTimeout(n,100)},0)},null,null,9999)}});var f=new a.Deferred;this.promise=f.promise();a.when.apply(this,k).then(function(){f.resolve()});this.editor=this.eq(0).data("ckeditorInstance");return this}});CKEDITOR.config.jqueryOverrideVal&&(a.fn.val=CKEDITOR.tools.override(a.fn.val,function(g){return function(e){if(arguments.length){var m=
this,k=[],f=this.each(function(){var b=a(this),c=b.data("ckeditorInstance");if(b.is("textarea")&&c){var f=new a.Deferred;c.setData(e,function(){f.resolve()});k.push(f.promise());return!0}return g.call(b,e)});if(k.length){var b=new a.Deferred;a.when.apply(this,k).done(function(){b.resolveWith(m)});return b.promise()}return f}var f=a(this).eq(0),c=f.data("ckeditorInstance");return f.is("textarea")&&c?c.getData():g.call(f)}}))})(window.jQuery); | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_pt-pt.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"da manh\u00e3",
"da tarde"
],
"DAY": [
"domingo",
"segunda-feira",
"ter\u00e7a-feira",
"quarta-feira",
"quinta-feira",
"sexta-feira",
"s\u00e1bado"
],
"MONTH": [
"janeiro",
"fevereiro",
"mar\u00e7o",
"abril",
"maio",
"junho",
"julho",
"agosto",
"setembro",
"outubro",
"novembro",
"dezembro"
],
"SHORTDAY": [
"dom",
"seg",
"ter",
"qua",
"qui",
"sex",
"s\u00e1b"
],
"SHORTMONTH": [
"jan",
"fev",
"mar",
"abr",
"mai",
"jun",
"jul",
"ago",
"set",
"out",
"nov",
"dez"
],
"fullDate": "EEEE, d 'de' MMMM 'de' y",
"longDate": "d 'de' MMMM 'de' y",
"medium": "dd/MM/y HH:mm:ss",
"mediumDate": "dd/MM/y",
"mediumTime": "HH:mm:ss",
"short": "dd/MM/yy HH:mm",
"shortDate": "dd/MM/yy",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u20ac",
"DECIMAL_SEP": ",",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "pt-pt",
"pluralCat": function(n, opt_precision) { if (n >= 0 && n <= 2 && n != 2) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/DeepPhysX.Sofa-22.12.1.tar.gz/DeepPhysX.Sofa-22.12.1/examples/demos/Beam/FC/prediction.py | import os
import sys
# Sofa related imports
import Sofa.Gui
# DeepPhysX related imports
from DeepPhysX.Core.Database.BaseDatabaseConfig import BaseDatabaseConfig
from DeepPhysX.Core.Pipelines.BasePrediction import BasePrediction
from DeepPhysX.Sofa.Environment.SofaEnvironmentConfig import SofaEnvironmentConfig
from DeepPhysX.Sofa.Pipeline.SofaPrediction import SofaPrediction
from DeepPhysX.Torch.FC.FCConfig import FCConfig
# Session related imports
from download import BeamDownloader
from Environment.BeamPrediction import BeamPrediction, p_grid
def create_runner(visualizer=False):
# Environment config
environment_config = SofaEnvironmentConfig(environment_class=BeamPrediction,
visualizer='vedo' if visualizer else None,
env_kwargs={'visualizer': visualizer})
# FC config
nb_hidden_layers = 2
nb_neurons = p_grid.nb_nodes * 3
layers_dim = [nb_neurons] + [nb_neurons for _ in range(nb_hidden_layers + 1)] + [nb_neurons]
network_config = FCConfig(dim_output=3,
dim_layers=layers_dim,
biases=True)
# Dataset config
database_config = BaseDatabaseConfig(normalize=True)
# Define trained network session
dpx_session = 'beam_dpx'
user_session = 'beam_training_user'
# Take user session by default
session_name = user_session if os.path.exists('sessions/' + user_session) else dpx_session
# Runner
if visualizer:
return BasePrediction(network_config=network_config,
database_config=database_config,
environment_config=environment_config,
session_dir='sessions',
session_name=session_name,
step_nb=100)
else:
return SofaPrediction(network_config=network_config,
database_config=database_config,
environment_config=environment_config,
session_dir='sessions',
session_name=session_name,
step_nb=-1)
if __name__ == '__main__':
# Check missing data
BeamDownloader().get_session('predict')
# Get option
visualizer = False
if len(sys.argv) > 1:
# Check script option
if sys.argv[1] != '-v':
print("Script option must be '-v' to visualize predictions in a Vedo window."
"By default, prediction are rendered in a SOFA GUI.")
quit(0)
visualizer = True
if visualizer:
# Create and launch runner
runner = create_runner(visualizer)
runner.execute()
else:
# Create SOFA runner
runner = create_runner()
# Launch SOFA GUI
Sofa.Gui.GUIManager.Init("main", "qglviewer")
Sofa.Gui.GUIManager.createGUI(runner.root, __file__)
Sofa.Gui.GUIManager.SetDimension(1080, 1080)
Sofa.Gui.GUIManager.MainLoop(runner.root)
Sofa.Gui.GUIManager.closeGUI()
# Manually close the runner (security if stuff like additional dataset need to be saved)
runner.close()
# Delete unwanted files
for file in os.listdir(os.path.dirname(os.path.abspath(__file__))):
if '.ini' in file or '.log' in file:
os.remove(file) | PypiClean |
/DjangoComponents-0.0.0.33.tar.gz/DjangoComponents-0.0.0.33/django_components/static/js/src/search.js | var SearchOption = React.createClass({
update: function(e) {
this.props.click(this.props.children);
},
render: function() {
return <div className='search-suggestion' onMouseDown={this.update}>{this.props.children}</div>;
}
});
var DropdownSearch = React.createClass({
getInitialState: function() {
return {update: this.props.click};
},
render: function() {
var items = [];
var click = this.props.click;
var search = this.props.search.split(' ');
var data = this.props.data;
for (var i = 0; i < this.props.data.length; i++) {
var pushable = false;
var amt = 0;
for (var j = 0; j < search.length; j++) {
if (data[i].includes(search[j]) || search[j] == '') {
amt++;
}
}
if (amt == search.length) pushable = true;
if (pushable) {
items.push(this.props.data[i]);
}
}
items = items.map(function(item) {return (<SearchOption click={click}>{ item }</SearchOption>)})
return (
<div className='dynamic-search-content' style={this.props.style}>
{ items }
</div>
);
}
});
var CustomSearch = React.createClass({
getInitialState: function() {
return {
search:'',
showSuggestions:false,
suggestionStyles:{display:'none'},
searchId:0,
data: JSON.parse(this.props.data),
idData: (JSON.parse(this.props.data)).map(function(item){return item[0]}),
nameData: (JSON.parse(this.props.data)).map(function(item){return item[1]})
};
},
change: function(e) {
this.setState({search: e.target.value});
},
focus: function() {
this.setState({suggestionStyles:{display:'block'}});
},
blur: function(e) {
this.setState({suggestionStyles:{display:'none'}});
},
clickSuggestion: function(item) {
this.setState({
search: item,
searchId: this.state.idData[this.state.nameData.indexOf(item)]
});
this.blur();
},
render: function() {
if (!(this.props.data)) {
throw "Please pass CustomSearch some data!";
}
console.log(this.state.nameData)
return (
<div className='lk-dynamic-search'>
<input type='text' onChange={this.change} onFocus={this.focus} onBlur={this.blur} value={this.state.search} />
<input type='text' name={this.props.name} value={this.state.searchId} style={{display:'none'}} />
<DropdownSearch data={this.state.nameData} search={this.state.search} click={this.clickSuggestion} style={this.state.suggestionStyles} />
</div>
);
}
}); | PypiClean |
/Blogofile-0.8.3.tar.gz/Blogofile-0.8.3/blogofile/config.py | __author__ = "Ryan McGuire (ryan@enigmacurry.com)"
import os
import logging
import sys
import re
from . import cache
from . import controller
from . import plugin
from . import filter as _filter
from .cache import HierarchicalCache as HC
# TODO: This import MUST come after cache is imported; that's too brittle!
import blogofile_bf as bf
logger = logging.getLogger("blogofile.config")
bf.config = sys.modules['blogofile.config']
site = cache.HierarchicalCache()
controllers = cache.HierarchicalCache()
filters = cache.HierarchicalCache()
plugins = cache.HierarchicalCache()
templates = cache.HierarchicalCache()
default_config_path = os.path.join(
os.path.dirname(__file__), "default_config.py")
def init_interactive(args=None):
"""Reset the blogofile cache objects, and load the configuration.
The user's _config.py is always loaded from the current directory
because we assume that the function/method that calls this has
already changed to the directory specified by the --src-dir
command line option.
"""
# TODO: What purpose does cache.reset_bf() serve? Looks like a
# testing hook.
cache.reset_bf()
try:
_load_config("_config.py")
except IOError:
sys.stderr.write("No configuration found in source dir: {0}\n"
.format(args.src_dir))
sys.stderr.write("Want to make a new site? Try `blogofile init`\n")
sys.exit(1)
def _load_config(user_config_path):
"""Load the configuration.
Strategy:
1) Load the default config
2) Load the plugins
3) Load the site filters and controllers
4) Load the user's config.
5) Compile file ignore pattern regular expressions
This establishes sane defaults that the user can override as they
wish.
config is exec-ed from Python modules into locals(), then updated
into globals().
"""
with open(default_config_path) as f:
exec(f.read())
plugin.load_plugins()
_filter.preload_filters()
controller.load_controllers(namespace=bf.config.controllers)
try:
with open(user_config_path) as f:
exec(f.read())
except IOError:
raise
_compile_file_ignore_patterns()
globals().update(locals())
def _compile_file_ignore_patterns():
site.compiled_file_ignore_patterns = []
for p in site.file_ignore_patterns:
if hasattr(p, "findall"):
# probably already a compiled regex.
site.compiled_file_ignore_patterns.append(p)
else:
site.compiled_file_ignore_patterns.append(
re.compile(p, re.IGNORECASE)) | PypiClean |
/Hebel-0.02.1.tar.gz/Hebel-0.02.1/hebel/layers/input_dropout.py |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy as np
import cPickle
from pycuda import gpuarray
from .dummy_layer import DummyLayer
from ..pycuda_ops.elementwise import sample_dropout_mask, \
apply_dropout_mask
from ..pycuda_ops.matrix import add_vec_to_mat
from ..pycuda_ops.reductions import matrix_sum_out_axis
class InputDropout(DummyLayer):
r"""This layer performs dropout on the input data.
It does not have any learnable parameters of its own. It should be
used as the first layer and will perform dropout with any dropout
probability on the incoming data.
**Parameters:**
n_in : integer
Number of input units.
dropout_probability : float in [0, 1]
Probability of dropping out each unit.
compute_input_gradients : Bool
Whether to compute the gradients with respect to the input
data. This only necessary if you're training a model where the
input itself is learned.
"""
def __init__(self, n_in, dropout_probability=.2,
compute_input_gradients=False):
self.n_in = n_in
self.n_units = n_in
assert dropout_probability >= 0. and \
dropout_probability <= 1.
self.dropout_probability = dropout_probability
self.compute_input_gradients = compute_input_gradients
self.persistent_temp_objects_config = (
('dropout_input', ('batch_size', self.n_units), np.float32),
('dropout_prob_array', ('batch_size', self.n_units), np.float32),
('dropout_mask', ('batch_size', self.n_units), np.int8)
)
def feed_forward(self, input_data, prediction=False):
"""Propagate forward through the layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to perform dropout on.
prediction : bool, optional
Whether to use prediction model. If true, then the data is
scaled by ``1 - dropout_probability`` uses dropout.
**Returns:**
dropout_data : ``GPUArray``
The data after performing dropout.
"""
assert input_data.shape[1] == self.n_in
if not prediction:
dropout_input = self.get_temp_object('dropout_input',
input_data.shape, input_data.dtype)
dropout_prob_array = self.get_temp_object('dropout_prob_array',
input_data.shape, input_data.dtype)
dropout_mask = self.get_temp_object('dropout_mask',
input_data.shape, np.int8)
sample_dropout_mask(input_data,
self.dropout_probability, target=dropout_input,
dropout_prob_array=dropout_prob_array, dropout_mask=dropout_mask)
return dropout_input, dropout_mask
else:
return (input_data * (1 - self.dropout_probability),)
def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to perform dropout on.
df_output : ``GPUArray``
Gradients with respect to the output of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : empty tuple
Gradients are empty since this layer has no parameters.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
if self.compute_input_gradients:
apply_dropout_mask(df_output, dropout_mask)
return tuple(), df_output | PypiClean |
/Djam-0.9.8.tar.gz/Djam-0.9.8/djam/utils.py | from __future__ import unicode_literals, division
import os, re, hashlib, threading, string
from binascii import hexlify
from django.conf import settings
from django.utils import six
class SharedStateBase(object):
"Allow all instances to 'reliably' share variables"
# Use the 'Borg pattern' to share state between all instances.
# See source code of django.db.models.loading for example...
__shared_state = dict(
# instances may use this to share variables on a per thread basis
_local=threading.local(),
)
def __init__(self):
"I shall always be called"
self.__dict__ = self.__shared_state
class SettingRename(object):
def __init__(self, settingFmt):
self.settingFmt = settingFmt
def __call__(self, name):
"""
Check if a setting has been defined to overwrite name
returns setting if it exist or name...
"""
setting4name = self.settingFmt(name.upper())
overwrite = getattr(settings, setting4name, None)
return overwrite or name
def calculate_password_verifier(password, seed=None, hmethod='sha1'):
# make sure password is not empty
password = password.strip()
if not password:
raise ValueError("can not use empty password")
h = getattr(hashlib, hmethod)()
if seed is None:
seed = hexlify(os.urandom(h.digest_size))
h.update(seed)
h.update(password)
return "$".join([hmethod, seed, h.hexdigest()])
class FolderLoader(object):
"Provide a callable that load file content from a Folder"
def __init__(self, refFileName):
self.baseFolder = os.path.dirname(os.path.abspath(refFileName))
if not os.path.exists(self.baseFolder):
raise ValueError(
"Unable to resolve containing folder for %s" % refFileName
)
def load(self, filename):
f = open(os.path.join(self.baseFolder, filename), 'r')
return f.read()
def get_cbv_object(viewfunc):
"""
If viewfunc has been obtained using CBV.as_view(**initkwargs) factory
returns instance of CBV that viewfunc will construct each time it is called
"""
if getattr(viewfunc, '__closure__', None) is None:
# viewfunc has not been constructed using CBV
return
try:
# We assume that viewfunc was returned by CBV.as_view(**initkwargs)
# we try to retrieve CBV class & initkwargs
#
# this approach is **fragile** as it rely on inner variable names,
# used in base as_view implementation
ctx = dict(zip(view.__code__.co_freevars,
[c.cell_contents for c in (view.__closure__ or [])]
))
initkwargs = ctx.get('initkwargs') or {}
CBV = ctx.get('cls')
if callable(CBV):
return CBV(**initkwargs)
except:
return None
_STEPS = range(4, 0, -1) # cache possible formatting steps
_SEPARATORS = string.whitespace + "_-"
def r2h(rawId, sep=" "):
"""
return readability enhanced identifier
by inserting a separator every n characters
"""
rId = str(rawId).strip()
lId = len(rId)
for s in _STEPS:
if lId % s == 0:
break
if s == 1:
return rId
buf = six.StringIO(rId)
parts = [buf.read(s) for i in range(lId // s)]
return sep.join(parts)
if six.PY2:
from django.utils.encoding import force_bytes, force_text
translate = string.translate
def h2r(humId):
"""
remove formatting separators from readability enhanced identifier
"""
return force_text(translate(force_bytes(humId), None, _SEPARATORS))
else:
# TODO: works on Python 3 support...
def h2r(humId):
raise NotImplementedError("no implementation for Python 3") | PypiClean |
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/methods/stream/resume_stream.py | import asyncio
from typing import Union
from ...exceptions import NodeJSNotRunning
from ...exceptions import NoMtProtoClientSet
from ...exceptions import NotInGroupCallError
from ...mtproto import BridgedClient
from ...scaffold import Scaffold
from ...types import NotInGroupCall
from ...types.session import Session
class ResumeStream(Scaffold):
async def resume_stream(
self,
chat_id: Union[int, str],
):
"""Resume the paused stream
This method allow to resume the paused streaming file
Parameters:
chat_id (``int`` | ``str``):
Unique identifier of the target chat.
Can be a direct id (int) or a username (str)
Raises:
NoMtProtoClientSet: In case you try
to call this method without any MtProto client
NodeJSNotRunning: In case you try
to call this method without do
:meth:`~newcalls.NewCalls.start` before
NotInGroupCallError: In case you try
to leave a non-joined group call
Returns:
``bool``:
On success, true is returned if was resumed
Example:
.. code-block:: python
:emphasize-lines: 10-12
from newcalls import Client
from newcalls import idle
...
app = NewCalls(client)
app.start()
... # Call API methods
app.resume_stream(
-1001185324811,
)
idle()
"""
try:
chat_id = int(chat_id)
except ValueError:
chat_id = BridgedClient.chat_id(
await self._app.resolve_peer(chat_id),
)
if self._app is not None:
if self._wait_until_run is not None:
solver_id = Session.generate_session_id(24)
async def internal_sender():
if not self._wait_until_run.done():
await self._wait_until_run
await self._binding.send({
'action': 'resume',
'chat_id': chat_id,
'solver_id': solver_id,
})
active_call = self._call_holder.get_active_call(chat_id)
asyncio.ensure_future(internal_sender())
result = await self._wait_result.wait_future_update(
solver_id,
)
if isinstance(result, NotInGroupCall):
raise NotInGroupCallError()
return active_call.status == 'paused'
else:
raise NodeJSNotRunning()
else:
raise NoMtProtoClientSet() | PypiClean |
/Nitrous-0.9.3-py3-none-any.whl/turbogears/i18n/data/zh.py |
languages={'gv': u'\u9a6c\u6069\u5c9b\u6587', 'gu': u'\u53e4\u52a0\u62c9\u63d0\u6587', 'gd': u'\u82cf\u683c\u5170- \u76d6\u5c14\u6587', 'ga': u'\u7231\u5c14\u5170\u6587', 'gn': u'\u74dc\u62c9\u5c3c\u6587', 'gl': u'\u52a0\u5229\u897f\u4e9a\u6587', 'la': u'\u62c9\u4e01\u6587', 'ln': u'\u6797\u52a0\u62c9\u6587', 'lo': u'\u8001\u631d\u6587', 'tt': u'\u9791\u977c\u6587', 'tr': u'\u571f\u8033\u5176\u6587', 'ts': u'\u7279\u677e\u52a0\u6587', 'lv': u'\u62c9\u812b\u7dad\u4e9e\u6587', 'to': u'\u6c64\u52a0\u6587', 'lt': u'\u7acb\u9676\u5b9b\u6587', 'tk': u'\u571f\u5e93\u66fc\u6587', 'th': u'\u6cf0\u6587', 'ti': u'\u63d0\u683c\u91cc\u5c3c\u4e9a\u6587', 'tg': u'\u5854\u5409\u514b\u6587', 'te': u'\u6cf0\u5362\u56fa\u6587', 'haw': u'\u590f\u5a01\u5937\u6587', 'yi': u'\u4f9d\u5730\u6587', 'yo': u'\u7ea6\u9c81\u5df4\u6587', 'de': u'\u5fb7\u6587', 'da': u'\u4e39\u9ea6\u6587', 'dz': u'\u4e0d\u4e39\u6587', 'st': u'\u585e\u7d22\u6258\u6587', 'qu': u'\u76d6\u4e18\u4e9a\u6587', 'el': u'\u5e0c\u814a\u6587', 'eo': u'\u4e16\u754c\u6587', 'en': u'\u82f1\u6587', 'zh': u'\u4e2d\u6587', 'za': u'\u85cf\u6587', 'uk': u'\u4e4c\u514b\u5170\u6587', 'eu': u'\u5df4\u65af\u514b\u6587', 'et': u'\u7231\u6c99\u5c3c\u4e9a\u6587', 'es': u'\u897f\u73ed\u7259\u6587', 'ru': u'\u4fc4\u6587', 'rw': u'\u5362\u65fa\u8fbe\u6587', 'rm': u'\u91cc\u6258\u7f57\u66fc\u65af\u6587', 'rn': u'\u57fa\u9686\u8fea\u6587', 'ro': u'\u7f57\u9a6c\u5c3c\u4e9a\u6587', 'bn': u'\u5b5f\u52a0\u62c9\u6587', 'be': u'\u767d\u4fc4\u7f57\u65af\u6587', 'bg': u'\u4fdd\u52a0\u5229\u4e9a\u6587', 'ba': u'\u5df4\u4ec0\u5ba2\u5c14\u6587', 'wo': u'\u6c83\u5c14\u592b\u6587', 'jv': u'\u722a\u54c7\u6587', 'bo': u'\u897f\u85cf\u6587', 'bh': u'\u6bd4\u54c8\u5c14\u6587', 'bi': u'\u6bd4\u65af\u62c9\u9a6c\u6587', 'br': u'\u5e03\u91cc\u591a\u5c3c\u6587', 'ja': u'\u65e5\u6587', 'om': u'\u963f\u66fc\u6587', 'oc': u'\u5965\u897f\u5766\u6587', 'tw': u'\u53f0\u6e7e\u6587', 'or': u'\u6b27\u91cc\u4e9a\u6587', 'xh': u'\u73ed\u56fe\u6587', 'co': u'\u79d1\u897f\u5609\u6587', 'ca': u'\u52a0\u6cf0\u7f57\u5c3c\u4e9a\u6587', 'cy': u'\u5a01\u5c14\u58eb\u6587', 'cs': u'\u6377\u514b\u6587', 'ps': u'\u666e\u4ec0\u56fe\u6587', 'pt': u'\u8461\u8404\u7259\u6587', 'tl': u'\u5854\u52a0\u8def\u65cf\u6587', 'pa': u'\u65c1\u906e\u666e\u6587', 'vi': u'\u8d8a\u5357\u6587', 'pl': u'\u6ce2\u5170\u6587', 'hy': u'\u4e9a\u7f8e\u5c3c\u4e9a\u6587', 'hr': u'\u514b\u7f57\u5730\u4e9a\u6587', 'iu': u'\u7231\u65af\u57fa\u6469\u6587', 'hu': u'\u5308\u7259\u5229\u6587', 'hi': u'\u5370\u5730\u6587', 'ha': u'\u8c6a\u6492\u6587', 'he': u'\u5e0c\u4f2f\u6765\u6587', 'mg': u'\u9a6c\u5c14\u52a0\u4ec0\u6587', 'uz': u'\u4e4c\u5179\u522b\u514b\u6587', 'ml': u'\u9a6c\u6765\u4e9a\u62c9\u59c6\u6587', 'mo': u'\u6469\u5c14\u591a\u74e6\u6587', 'mn': u'\u8499\u53e4\u6587', 'mi': u'\u6bdb\u5229\u6587', 'ik': u'\u4f9d\u5974\u76ae\u7ef4\u514b\u6587', 'mk': u'\u9a6c\u5176\u987f\u6587', 'ur': u'\u4e4c\u5c14\u90fd\u6587', 'mt': u'\u9a6c\u8033\u4ed6\u6587', 'ms': u'\u9a6c\u6765\u6587', 'mr': u'\u9a6c\u62c9\u5730\u6587', 'ug': u'\u7ef4\u543e\u5c14\u6587', 'ta': u'\u6cf0\u7c73\u5c14\u6587', 'my': u'\u7f05\u7538\u6587', 'aa': u'\u963f\u6cd5\u6587', 'ab': u'\u963f\u5e03\u54c8\u897f\u4e9a\u6587', 'ss': u'\u8f9b\u8f9b\u90a3\u63d0\u6587', 'af': u'\u5357\u975e\u8377\u5170\u6587', 'tn': u'\u7a81\u5c3c\u65af\u6587', 'sw': u'\u65af\u74e6\u5e0c\u91cc\u6587', 'is': u'\u51b0\u5c9b\u6587', 'am': u'\u963f\u59c6\u54c8\u62c9\u6587', 'it': u'\u610f\u5927\u5229\u6587', 'sv': u'\u745e\u5178\u6587', 'ia': u'\u62c9\u4e01\u56fd\u9645\u6587', 'as': u'\u963f\u8428\u59c6\u6587', 'ar': u'\u963f\u62c9\u4f2f\u6587', 'su': u'\u82cf\u4e39\u6587', 'zu': u'\u7956\u9c81\u6587', 'ay': u'\u827e\u9a6c\u62c9\u6587', 'az': u'\u963f\u585e\u62dc\u7586\u6587', 'ie': u'\u62c9\u4e01\u56fd\u9645\u6587', 'id': u'\u5370\u5ea6\u5c3c\u897f\u4e9a\u6587', 'sk': u'\u65af\u6d1b\u4f10\u514b\u6587', 'nl': u'\u8377\u5170\u6587', 'nn': u'\u632a\u5a01\u5c3c\u8bfa\u65af\u514b\u6587', 'no': u'\u632a\u5a01\u6587', 'na': u'\u7459\u9c81\u6587', 'nb': u'\u632a\u5a01\u535a\u514b\u9a6c\u5c14\u6587', 'ne': u'\u5c3c\u6cca\u5c14\u6587', 'vo': u'\u6c83\u62c9\u666e\u514b\u6587', 'so': u'\u7d22\u9a6c\u91cc\u6587', 'fr': u'\u6cd5\u6587', 'sm': u'\u8428\u6469\u4e9a\u6587', 'fy': u'\u5f17\u91cc\u65af\u5170\u6587', 'fa': u'\u6ce2\u65af\u6587', 'fi': u'\u82ac\u5170\u6587', 'fj': u'\u6590\u6d4e\u6587', 'sa': u'\u68b5\u6587', 'fo': u'\u6cd5\u7f57\u6587', 'ka': u'\u683c\u9c81\u5409\u4e9a\u6587', 'kk': u'\u54c8\u8428\u514b\u6587', 'sr': u'\u585e\u5c14\u7ef4\u4e9a\u6587', 'sq': u'\u963f\u5c14\u5df4\u5c3c\u4e9a\u6587', 'ko': u'\u97e9\u6587', 'kn': u'\u57c3\u7eb3\u5fb7\u6587', 'km': u'\u67ec\u57d4\u5be8\u6587', 'kl': u'\u683c\u9675\u5170\u6587', 'ks': u'\u514b\u4ec0\u7c73\u5c14\u6587', 'si': u'\u50e7\u4f3d\u7f57\u6587', 'sh': u'\u585e\u6ce2\u5c3c\u65af-\u514b\u7f57\u5730\u4e9a\u6587', 'kw': u'\u51ef\u5c14\u7279\u6587', 'sn': u'\u585e\u5185\u52a0\u5c14\u6587', 'ku': u'\u5e93\u5c14\u5fb7\u6587', 'sl': u'\u65af\u6d1b\u6587\u5c3c\u4e9a\u6587', 'ky': u'\u5409\u5c14\u5409\u65af\u6587', 'sg': u'\u6851\u6208\u6587', 'sd': u'\u82cf\u4e39\u6587'}
countries={'BD': u'\u5b5f\u52a0\u62c9\u56fd', 'BE': u'\u6bd4\u5229\u65f6', 'BF': u'\u5e03\u57fa\u7eb3\u6cd5\u7d22', 'BG': u'\u4fdd\u52a0\u5229\u4e9a', 'BA': u'\u6ce2\u65af\u5c3c\u4e9a\u548c\u9ed1\u5c71\u5171\u548c\u56fd', 'BB': u'\u5df4\u5df4\u591a\u65af', 'WF': u'\u74e6\u5229\u65af\u548c\u5bcc\u56fe\u7eb3', 'BM': u'\u767e\u6155\u5927', 'BN': u'\u6587\u83b1', 'BO': u'\u73bb\u5229\u7ef4\u4e9a', 'BH': u'\u5df4\u6797', 'BI': u'\u5e03\u9686\u8fea', 'BJ': u'\u8d1d\u5b81', 'BT': u'\u4e0d\u4e39', 'JM': u'\u7259\u4e70\u52a0', 'BV': u'\u5e03\u7ef4\u7279\u5c9b', 'BW': u'\u535a\u8328\u74e6\u7eb3', 'WS': u'\u8428\u6469\u4e9a', 'BR': u'\u5df4\u897f', 'BS': u'\u5df4\u54c8\u9a6c', 'BY': u'\u767d\u4fc4\u7f57\u65af', 'BZ': u'\u4f2f\u5229\u5179', 'RU': u'\u4fc4\u7f57\u65af\u8054\u90a6', 'RW': u'\u5362\u65fa\u8fbe', 'TL': u'\u4e1c\u5e1d\u6c76', 'RE': u'\u7559\u5c3c\u6c6a', 'TM': u'\u571f\u5e93\u66fc\u65af\u5766', 'TJ': u'\u5854\u5409\u514b\u65af\u5766', 'RO': u'\u7f57\u9a6c\u5c3c\u4e9a', 'TK': u'\u6258\u514b\u52b3', 'GW': u'\u51e0\u5185\u4e9a\u6bd4\u7ecd', 'GU': u'\u5173\u5c9b', 'GT': u'\u5371\u5730\u9a6c\u62c9', 'GS': u'\u5357\u4f50\u6cbb\u4e9a\u548c\u5357\u4e09\u660e\u6cbb\u7fa4\u5c9b', 'GR': u'\u5e0c\u814a', 'GQ': u'\u8d64\u9053\u51e0\u5185\u4e9a', 'GP': u'\u74dc\u5fb7\u7f57\u666e\u5c9b', 'JP': u'\u65e5\u672c', 'GY': u'\u572d\u4e9a\u90a3', 'GF': u'\u6cd5\u5c5e\u572d\u4e9a\u90a3', 'GE': u'\u683c\u9c81\u5409\u4e9a', 'GD': u'\u683c\u6797\u7eb3\u8fbe', 'GB': u'\u82f1\u56fd', 'GA': u'\u52a0\u84ec', 'SV': u'\u8428\u5c14\u74e6\u591a', 'GN': u'\u51e0\u5185\u4e9a', 'GM': u'\u5188\u6bd4\u4e9a', 'GL': u'\u683c\u9675\u5170', 'GI': u'\u76f4\u5e03\u7f57\u9640', 'GH': u'\u52a0\u7eb3', 'OM': u'\u963f\u66fc', 'TN': u'\u7a81\u5c3c\u65af', 'JO': u'\u7ea6\u65e6', 'SP': u'\u585e\u5c14\u7ef4\u4e9a', 'HR': u'\u514b\u7f57\u5730\u4e9a', 'HT': u'\u6d77\u5730', 'HU': u'\u5308\u7259\u5229', 'HK': u'\u4e2d\u56fd\u9999\u6e2f\u7279\u522b\u884c\u653f\u533a', 'HN': u'\u6d2a\u90fd\u62c9\u65af', 'HM': u'\u8d6b\u5fb7\u4e0e\u9ea6\u514b\u5510\u7eb3\u7fa4\u5c9b', 'VE': u'\u59d4\u5185\u745e\u62c9', 'PR': u'\u6ce2\u591a\u9ece\u5404', 'PS': u'\u5df4\u52d2\u65af\u5766\u9886\u571f', 'PW': u'\u5e15\u52b3', 'PT': u'\u8461\u8404\u7259', 'SJ': u'\u65af\u74e6\u5c14\u5df4\u7279\u548c\u626c\u9a6c\u5ef6', 'PY': u'\u5df4\u62c9\u572d', 'IQ': u'\u4f0a\u62c9\u514b', 'PA': u'\u5df4\u62ff\u9a6c', 'PF': u'\u6cd5\u5c5e\u6ce2\u5229\u5c3c\u897f\u4e9a', 'PG': u'\u5df4\u5e03\u4e9a\u65b0\u51e0\u5185\u4e9a', 'PE': u'\u79d8\u9c81', 'PK': u'\u5df4\u57fa\u65af\u5766', 'PH': u'\u83f2\u5f8b\u5bbe', 'PN': u'\u76ae\u7279\u51ef\u6069', 'PL': u'\u6ce2\u5170', 'PM': u'\u5723\u76ae\u57c3\u5c14\u548c\u5bc6\u514b\u9686', 'ZM': u'\u8d5e\u6bd4\u4e9a', 'EH': u'\u897f\u6492\u54c8\u62c9', 'EE': u'\u7231\u6c99\u5c3c\u4e9a', 'EG': u'\u57c3\u53ca', 'ZA': u'\u5357\u975e', 'EC': u'\u5384\u74dc\u591a\u5c14', 'IT': u'\u610f\u5927\u5229', 'VN': u'\u8d8a\u5357', 'SB': u'\u6240\u7f57\u95e8\u7fa4\u5c9b', 'ET': u'\u57c3\u585e\u4fc4\u6bd4\u4e9a', 'SO': u'\u7d22\u9a6c\u91cc', 'ZW': u'\u6d25\u5df4\u5e03\u97e6', 'SA': u'\u6c99\u7279\u963f\u62c9\u4f2f', 'ES': u'\u897f\u73ed\u7259', 'ER': u'\u5384\u7acb\u7279\u91cc\u4e9a', 'MD': u'\u6469\u5c14\u591a\u74e6\u5171\u548c\u56fd', 'MG': u'\u9a6c\u8fbe\u52a0\u65af\u52a0', 'MA': u'\u6469\u6d1b\u54e5', 'MC': u'\u6469\u7eb3\u54e5', 'UZ': u'\u4e4c\u5179\u522b\u514b\u65af\u5766', 'MM': u'\u7f05\u7538', 'ML': u'\u9a6c\u91cc', 'MO': u'\u4e2d\u56fd\u6fb3\u95e8\u7279\u522b\u884c\u653f\u533a', 'MN': u'\u8499\u53e4', 'MH': u'\u9a6c\u7ecd\u5c14\u7fa4\u5c9b', 'MK': u'\u9a6c\u5176\u987f\u738b\u56fd', 'MU': u'\u6bdb\u91cc\u6c42\u65af', 'MT': u'\u9a6c\u8033\u4ed6', 'MW': u'\u9a6c\u62c9\u7ef4', 'MV': u'\u9a6c\u5c14\u4ee3\u592b', 'MQ': u'\u9a6c\u63d0\u5c3c\u514b\u5c9b', 'MP': u'\u5317\u9a6c\u91cc\u4e9a\u7eb3\u7fa4\u5c9b', 'MS': u'\u8499\u7279\u585e\u62c9\u7fa4\u5c9b', 'MR': u'\u6bdb\u91cc\u5854\u5c3c\u4e9a', 'UG': u'\u4e4c\u5e72\u8fbe', 'MY': u'\u9a6c\u6765\u897f\u4e9a', 'MX': u'\u58a8\u897f\u54e5', 'IL': u'\u4ee5\u8272\u5217', 'FR': u'\u6cd5\u56fd', 'IO': u'\u82f1\u5c5e\u5370\u5ea6\u6d0b\u9886\u5730', 'SH': u'\u5723\u8d6b\u52d2\u62ff', 'FI': u'\u82ac\u5170', 'FJ': u'\u6590\u6d4e', 'FK': u'\u798f\u514b\u5170\u7fa4\u5c9b', 'FM': u'\u5bc6\u514b\u7f57\u5c3c\u897f\u4e9a\u8054\u90a6', 'FO': u'\u6cd5\u7f57\u7fa4\u5c9b', 'NI': u'\u5c3c\u52a0\u62c9\u74dc', 'NL': u'\u8377\u5170', 'NO': u'\u632a\u5a01', 'NA': u'\u7eb3\u7c73\u6bd4\u4e9a', 'VU': u'\u74e6\u52aa\u963f\u56fe', 'NC': u'\u65b0\u5580\u91cc\u591a\u5c3c\u4e9a', 'NE': u'\u5c3c\u65e5\u5c14', 'NF': u'\u8bfa\u798f\u514b\u5c9b', 'NG': u'\u5c3c\u65e5\u5229\u4e9a', 'NZ': u'\u65b0\u897f\u5170', 'NP': u'\u5c3c\u6cca\u5c14', 'NR': u'\u7459\u9c81', 'NU': u'\u7ebd\u57c3', 'CK': u'\u5e93\u514b\u7fa4\u5c9b', 'CI': u'\u8c61\u7259\u6d77\u5cb8', 'CH': u'\u745e\u58eb', 'CO': u'\u54e5\u4f26\u6bd4\u4e9a', 'CN': u'\u4e2d\u56fd', 'CM': u'\u5580\u9ea6\u9686', 'CL': u'\u667a\u5229', 'CC': u'\u79d1\u79d1\u65af\u7fa4\u5c9b', 'CA': u'\u52a0\u62ff\u5927', 'CG': u'\u521a\u679c', 'CF': u'\u4e2d\u975e\u5171\u548c\u56fd', 'CD': u'\u521a\u679c\u6c11\u4e3b\u5171\u548c\u56fd', 'CZ': u'\u6377\u514b\u5171\u548c\u56fd', 'CY': u'\u585e\u6d66\u8def\u65af', 'CX': u'\u5723\u8bde\u5c9b', 'CR': u'\u54e5\u65af\u8fbe\u9ece\u52a0', 'CV': u'\u4f5b\u5f97\u89d2', 'CU': u'\u53e4\u5df4', 'SZ': u'\u65af\u5a01\u58eb\u5170', 'SY': u'\u53d9\u5229\u4e9a', 'KG': u'\u5409\u5c14\u5409\u514b\u65af\u5766', 'KE': u'\u80af\u5c3c\u4e9a', 'SR': u'\u82cf\u91cc\u5357', 'KI': u'\u57fa\u91cc\u5df4\u65af', 'KH': u'\u67ec\u57d4\u5be8', 'KN': u'\u5723\u57fa\u8328\u548c\u5c3c\u7ef4\u65af', 'KM': u'\u79d1\u6469\u7f57', 'ST': u'\u5723\u591a\u7f8e\u548c\u666e\u6797\u897f\u6bd4', 'SK': u'\u65af\u6d1b\u4f10\u514b', 'KR': u'\u97e9\u56fd', 'SI': u'\u65af\u6d1b\u6587\u5c3c\u4e9a', 'KP': u'\u5317\u671d\u9c9c', 'KW': u'\u79d1\u5a01\u7279', 'SN': u'\u585e\u5185\u52a0\u5c14', 'SM': u'\u5723\u9a6c\u529b\u8bfa', 'SL': u'\u585e\u62c9\u5229\u6602', 'SC': u'\u585e\u820c\u5c14', 'KZ': u'\u54c8\u8428\u514b\u65af\u5766', 'KY': u'\u5f00\u66fc\u7fa4\u5c9b', 'SG': u'\u65b0\u52a0\u5761', 'SE': u'\u745e\u5178', 'SD': u'\u82cf\u4e39', 'DO': u'\u591a\u7c73\u5c3c\u52a0\u5171\u548c\u56fd', 'DM': u'\u591a\u7c73\u5c3c\u52a0\u5c98', 'DJ': u'\u5409\u5e03\u63d0', 'DK': u'\u4e39\u9ea6', 'VG': u'\u82f1\u5c5e\u7ef4\u4eac\u7fa4\u5c9b', 'DE': u'\u5fb7\u56fd', 'YE': u'\u4e5f\u95e8', 'DZ': u'\u963f\u5c14\u53ca\u5229\u4e9a', 'US': u'\u7f8e\u56fd', 'UY': u'\u4e4c\u62c9\u572d', 'YU': u'\u5357\u65af\u62c9\u592b', 'YT': u'\u9a6c\u7ea6\u7279', 'UM': u'\u7f8e\u56fd\u8fb9\u8fdc\u5c0f\u5c9b', 'LB': u'\u9ece\u5df4\u5ae9', 'LC': u'\u5723\u5362\u897f\u4e9a', 'LA': u'\u8001\u631d\u4eba\u6c11\u6c11\u4e3b\u5171\u548c\u56fd', 'TV': u'\u56fe\u74e6\u5362', 'TW': u'\u53f0\u6e7e', 'TT': u'\u7279\u7acb\u5c3c\u8fbe\u548c\u591a\u5df4\u54e5', 'TR': u'\u571f\u8033\u5176', 'LK': u'\u65af\u91cc\u5170\u5361', 'LI': u'\u5217\u652f\u6566\u58eb\u767b', 'LV': u'\u62c9\u8131\u7ef4\u4e9a', 'TO': u'\u6c64\u52a0', 'LT': u'\u7acb\u9676\u5b9b', 'LU': u'\u5362\u68ee\u5821', 'LR': u'\u5229\u6bd4\u91cc\u4e9a', 'LS': u'\u83b1\u7d22\u6258', 'TH': u'\u6cf0\u56fd', 'TF': u'\u6cd5\u5c5e\u5357\u534a\u7403\u9886\u5730', 'TG': u'\u591a\u54e5', 'TD': u'\u4e4d\u5f97', 'TC': u'\u7279\u514b\u65af\u548c\u51ef\u79d1\u65af\u7fa4\u5c9b', 'LY': u'\u5229\u6bd4\u4e9a', 'VA': u'\u68b5\u8482\u5188', 'VC': u'\u5723\u6587\u68ee\u7279\u548c\u683c\u6797\u7eb3\u4e01\u65af', 'AE': u'\u963f\u62c9\u4f2f\u8054\u5408\u914b\u957f\u56fd', 'AD': u'\u5b89\u9053\u5c14', 'AG': u'\u5b89\u63d0\u74dc\u548c\u5df4\u5e03\u8fbe', 'AF': u'\u963f\u5bcc\u6c57', 'AI': u'\u5b89\u572d\u62c9', 'VI': u'\u7f8e\u5c5e\u7ef4\u4eac\u7fa4\u5c9b', 'IS': u'\u51b0\u5c9b', 'IR': u'\u4f0a\u6717', 'AM': u'\u4e9a\u7f8e\u5c3c\u4e9a', 'AL': u'\u963f\u5c14\u5df4\u5c3c\u4e9a', 'AO': u'\u5b89\u54e5\u62c9', 'AN': u'\u8377\u5c5e\u5b89\u7684\u5217\u65af\u7fa4\u5c9b', 'AQ': u'\u5357\u6781\u6d32', 'AS': u'\u7f8e\u5c5e\u8428\u6469\u4e9a', 'AR': u'\u963f\u6839\u5ef7', 'AU': u'\u6fb3\u5927\u5229\u4e9a', 'AT': u'\u5965\u5730\u5229', 'AW': u'\u963f\u9c81\u5df4', 'IN': u'\u5370\u5ea6', 'TZ': u'\u5766\u6851\u5c3c\u4e9a', 'AZ': u'\u963f\u585e\u62dc\u7586', 'IE': u'\u7231\u5c14\u5170', 'ID': u'\u5370\u5ea6\u5c3c\u897f\u4e9a', 'UA': u'\u4e4c\u514b\u5170', 'QA': u'\u5361\u5854\u5c14', 'MZ': u'\u83ab\u6851\u6bd4\u514b'}
months=[u'\u4e00\u6708', u'\u4e8c\u6708', u'\u4e09\u6708', u'\u56db\u6708', u'\u4e94\u6708', u'\u516d\u6708', u'\u4e03\u6708', u'\u516b\u6708', u'\u4e5d\u6708', u'\u5341\u6708', u'\u5341\u4e00\u6708', u'\u5341\u4e8c\u6708']
abbrMonths=[u'\u4e00\u6708', u'\u4e8c\u6708', u'\u4e09\u6708', u'\u56db\u6708', u'\u4e94\u6708', u'\u516d\u6708', u'\u4e03\u6708', u'\u516b\u6708', u'\u4e5d\u6708', u'\u5341\u6708', u'\u5341\u4e00\u6708', u'\u5341\u4e8c\u6708']
days=[u'\u661f\u671f\u4e00', u'\u661f\u671f\u4e8c', u'\u661f\u671f\u4e09', u'\u661f\u671f\u56db', u'\u661f\u671f\u4e94', u'\u661f\u671f\u516d', u'\u661f\u671f\u65e5']
abbrDays=[u'\u4e00', u'\u4e8c', u'\u4e09', u'\u56db', u'\u4e94', u'\u516d', u'\u65e5']
dateFormats={'medium': '%Y %%(abbrmonthname)s %d ', 'full': '%%(dayname)s, %Y %%(monthname)s %d ', 'long': '%Y %%(monthname)s %d ', 'short': '%y/%m/%d'}
numericSymbols={'group': ',', 'nativeZeroDigit': '0', 'exponential': 'E', 'perMille': u'\u2030', 'nan': u'\ufffd', 'decimal': '.', 'percentSign': '%', 'list': ';', 'patternDigit': '#', 'plusSign': '+', 'infinity': u'\u221e', 'minusSign': '-'} | PypiClean |
/GraphLab_Create-2.1-cp27-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.macosx_10_11_intel.macosx_10_11_x86_64.whl/graphlab/toolkits/data_matching/similarity_search.py | from graphlab.data_structures.sframe import SFrame as _SFrame
from graphlab.data_structures.image import Image as _Image
from graphlab.toolkits._model import CustomModel as _CustomModel
from graphlab.toolkits._main import ToolkitError as _ToolkitError
import graphlab as _gl
import graphlab.toolkits._internal_utils as _tkutl
import graphlab.connect as _mt
from graphlab.util import _raise_error_if_not_of_type
from graphlab.toolkits._internal_utils import _raise_error_if_column_exists
import time as _time
def get_default_options():
"""
Return default options information for the similarity search toolkit.
Returns
-------
out : SFrame
Each row in the output SFrames correspond to a parameter, and
includes columns for default values, lower and upper bounds,
description, and type.
"""
out = _SFrame({'name': ['method', 'feature_model', 'verbose'],
'default_value' : ['lsh', 'auto', 'True'],
'lower_bound': [None, None, 0],
'upper_bound': [None, None, 1],
'description': ['Method for searching reference data',
'Trained model for extracting features from raw data objects',
'Whether progress output is printed'],
'parameter_type': ['string', 'model', 'boolean']})
return out
def create(data, row_label=None, features=None, feature_model='auto',
method='lsh', verbose=True):
"""
Create a similarity search model, which can be used to quickly retrieve
items similar to a query observation. In the case of images, this model
automatically performs the appropriate feature engineering steps. NOTE:
If you are using a CPU for the creation step with feature_model='auto',
creation time may take a while. This is because extracting features for
images on a CPU is expensive. With a GPU, one can expect large speedups.
Parameters
----------
dataset : SFrame
The SFrame that represents the training data for the model, including at
least one column of images.
row_label : str, optional
Name of the SFrame column with row id's. If 'row_label' is not
specified, row numbers are used to identify reference dataset rows when
the model is queried.
features : str, optional
The name of an image column in the input 'dataset' SFrame.
feature_model : 'auto' | A model of type NeuralNetClassifier, optional
A trained model for extracting features from raw data objects. By
default ('auto'), we choose an appropriate model from our set of
pre-trained models. See
:class:`~graphlab.toolkits.feature_engineering.DeepFeatureExtractor` for
more information.
method : {'lsh', 'brute_force'}, optional
The method used for nearest neighbor search. The 'lsh' option uses
locality-sensitive hashing to find approximate results more quickly.
verbose : bool, optional
If True, print verbose output during model creation.
Returns
-------
out : SimilaritySearchModel
See Also
--------
SimilaritySearchModel
graphlab.toolkits.nearest_neighbors
graphlab.toolkits.feature_engineering
Notes
-----
The similarity search toolkit currently uses cosine distance to evaluate the
similarity between each query and candidate results.
Examples
--------
First, split data into reference and query.
>>> import graphlab as gl
>>> data = gl.SFrame('https://static.turi.com/datasets/mnist/sframe/train6k')
>>> reference, query = data.random_split(0.8)
Build neuralnet feature extractor for images:
>>> nn_model = gl.neuralnet_classifier.create(reference, target='label')
Construct SimilaritySearchModel:
>>> model = gl.similarity_search.create(reference, features= 'image',
... feature_model=nn_model)
Find the most similar items in the reference set for each item in the query
set:
>>> model.search(query)
"""
_mt._get_metric_tracker().track(__name__ + '.create')
_raise_error_if_not_of_type(data, [_SFrame])
_raise_error_if_not_of_type(features, [str])
_raise_error_if_column_exists(data, features)
if data[features].dtype() != _Image:
raise _ToolkitError("Feature `%s` must be of type Image" \
% features)
return SimilaritySearchModel(data, row_label=row_label, feature=features,
feature_model=feature_model, method=method, verbose=verbose)
class SimilaritySearchModel(_CustomModel):
"""
The similarity search toolkit searches a reference collection of raw data
objects, such as images, for items that are similar to a query. This toolkit
encapsulates the whole data processing pipeline that typically accompanies
this process: detection of input data types, extraction of numeric features,
and a nearest neighbors search. For advanced users, each phase of this
process can be customized, or the toolkit can choose good default options
automatically.
This model should not be constructed directly. Instead, use
:func:`graphlab.data_matching.similarity_search.create` to create an
instance of this model.
"""
_SIMILARITY_SEARCH_VERSION = 1
def __init__(self, data, row_label=None, feature=None, feature_model='auto',
method='brute_force', verbose=False):
start_time = _time.time()
self._state = {'row_label': row_label,
'method': method,
'verbose': verbose,
'features': feature,
'num_examples': data.num_rows()}
if row_label is not None:
data_subset = data[[feature, row_label]]
else:
data_subset = data[[feature]]
self._feature_type = data_subset[feature].dtype()
if data_subset[feature].dtype() == _Image:
prefix = 'extracted'
extractor = _gl.feature_engineering.DeepFeatureExtractor(
features=feature, output_column_prefix=prefix,
model=feature_model)
self._state['output_column_name'] = prefix + '.' + feature
self._state['feature_model'] = extractor['model']
self._extractor = extractor.fit(data_subset)
self._data = self._extractor.transform(data_subset)
else:
raise _ToolkitError('Feature type not supported.')
if method == 'brute_force':
self._neighbors_model = _gl.toolkits.nearest_neighbors.create(
self._data, label=row_label,
features=[self._state['output_column_name']],
distance='cosine', method='brute_force', verbose=verbose)
elif method == 'lsh':
num_tables = 20
num_projections_per_table = 16
self._neighbors_model = _gl.toolkits.nearest_neighbors.create(
self._data, label=row_label,
features=[self._state['output_column_name']],
distance='cosine', method = 'lsh',
num_tables=num_tables,
num_projections_per_table=num_projections_per_table,
verbose=verbose)
else:
raise _ToolkitError('Unsupported Method %s' % method)
self._state['training_time'] = _time.time() - start_time
def save(self, location, save_untransformed=False):
"""
Save the model. The model is saved as a directory which can then be
loaded using the :py:func:`~graphlab.load_model` method.
Parameters
----------
location : string
Target destination for the model. Can be a local path or remote URL.
save_untransformed: bool
Whether to save untransformed data (e.g. images) in the 'data' field. Images may take up quite a lot of space, and it may only be necessary to keep the internal representation (extracted features) of those images. Default is false.
See Also
----------
graphlab.load_model
Examples
----------
>>> model.save('my_model_file')
>>> loaded_model = gl.load_model('my_model_file')
"""
if not save_untransformed:
temp_untransformed = self._data[self._state['features']]
del self._data[self._state['features']]
super(SimilaritySearchModel,self).save(location)
self._data[self._state['features']] = temp_untransformed
else:
super(SimilaritySearchModel,self).save(location)
@classmethod
def _load_version(cls, unpickler, version):
"""
An function to load an object with a specific version of the class.
Parameters
----------
pickler : file
A GLUnpickler file handle.
version : int
A version number as maintained by the class writer.
"""
model = unpickler.load()
if version == 0:
feature = model._state['features']
model._state['output_column_name'] = 'extracted.' + feature
return model
def list_fields(self):
"""
List the model's queryable fields.
Returns
-------
out : list
Each element in the returned list can be queried with the ``get``
method.
"""
return list(self._state.keys())
def get(self, field):
"""
Return the value contained in the model's ``field``.
The list of all queryable fields is detailed below, and can be obtained
with the ``list_fields`` method.
+-----------------------+----------------------------------------------+
| Field | Description |
+=======================+==============================================+
| feature_model | Model for extracting features from raw data. |
+-----------------------+----------------------------------------------+
| features | Name of the feature column in the input data.|
+-----------------------+----------------------------------------------+
| method | Method for searching the reference data. |
+-----------------------+----------------------------------------------+
| num_examples | Number of reference data objects. |
+-----------------------+----------------------------------------------+
| row_label | Name of the row ID column. |
+-----------------------+----------------------------------------------+
| training_time | Time to create the model. |
+-----------------------+----------------------------------------------+
| verbose | Whether model creation progress is printed. |
+-----------------------+----------------------------------------------+
Parameters
----------
field : str
Name of the field to be retrieved.
Returns
-------
out
Value of the requested field.
See Also
--------
list_fields
"""
try:
return self._state[field]
except:
raise ValueError("There is no model field called {}".format(field))
def get_current_options(self):
"""
Return a dictionary with the options used to define and create the
current model.
Returns
-------
out : dict
Dictionary of option and values used to train the current instance
of the NearestNeighborDeduplication.
See Also
--------
get_default_options, list_fields, get
"""
return {k: self._state[k] for k in get_default_options()['name']}
def __str__(self):
"""
Return a string description of the model to the ``print`` method.
Returns
-------
out : string
A description of the NearestNeighborsModel.
"""
return self.__repr__()
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
model_fields = [
("Number of reference examples", 'num_examples')]
training_fields = [
("Method", 'method'),
("Total training time (seconds)", 'training_time')]
sections = [model_fields, training_fields]
section_titles = ['Schema', 'Training']
return (sections, section_titles)
def __repr__(self):
"""
Print a string description of the model when the model name is entered
in the terminal.
"""
(sections, section_titles) = self._get_summary_struct()
return _tkutl._toolkit_repr_print(self, sections, section_titles, width=30)
def search(self, data, row_label=None, k=5):
"""
Search for the nearest neighbors from the reference set for each element
of the query set. The query SFrame must include columns with the same
names as the row_label and feature columns used to create the
SimilaritySearchModel.
Parameters
----------
data : SFrame
Query data. Must contain columns with the same names and types as
the features used to train the model. Additional columns are
allowed, but ignored.
row_label : string, optional
Name of the query SFrame column with row id's. If 'row_label' is not
specified, row numbers are used to identify query dataset rows in
the output SFrame.
k : int, optional
Number of nearest neighbors to return from the reference set for
each query observation. The default is 5 neighbors.
Returns
-------
out
A SFrame that contains all the nearest neighbors.
Examples
--------
First, split data into reference and query:
>>> import graphlab as gl
>>> data = gl.SFrame('https://static.turi.com/datasets/mnist/sframe/train6k')
>>> reference, query = data.random_split(0.8)
Build a neural net feature extractor for images:
>>> nn_model = gl.neuralnet_classifier.create(reference, target='label')
Construct the SimilaritySearchModel:
>>> model = gl.similarity_search.create(reference, features='image',
... feature_model=nn_model)
Find the most similar items in the reference set for each query:
>>> model.search(query)
"""
_raise_error_if_not_of_type(row_label, [str, type(None)])
feature = self._state['features']
_raise_error_if_column_exists(data, feature)
if (data[feature].dtype() != self._feature_type):
raise ValueError('Feature columns must have same data type in both reference and query set')
if row_label != None:
_raise_error_if_column_exists(data, row_label)
if data[feature].dtype() == _Image:
transformed_data = self._extractor.transform(data)
else:
transformed_data = data
transformed_data[self._state['output_column_name']] = transformed_data[feature]
return self._neighbors_model.query(transformed_data, label=row_label, k=k) | PypiClean |
/Ibidas-0.1.26.tar.gz/Ibidas-0.1.26/docs/tutorial_example2.rst | Chromosome distribution
=======================
In this example, we will analyze the genomic locations of transcription factor targets, to determine if transription
factors favor specific chromosomes. Also, we will compare transcription factors on the chromosomes they target, to see if there are transcription
factors that target similar sets of chromosomes. Likewise, we will compare chromosomes, to see if there are chromosomes that attract common
transcription factors.
Importing the data
~~~~~~~~~~~~~~~~~~
We will use the transcription factor data that has been imported in the previous example. The same data
can be obtained directly using::
>>> yeastract = Get.yeast.yeastract()
Next to this transcription factor data, we also need the location of all genes on the chromosomes.
This information can be found in the ``SGD_features.tab``, which can be obtained from yeastgenome.org.
Unfortunately, similar to yeastract, also this file comes without fieldnames, so we specify those through the type::
rtype = """[feats:*]<(sgdid=bytes, feat_type=bytes, feat_qual=bytes, feat_name=bytes, gene_name=bytes,
gene_aliases=bytes, feat_parent_name=bytes, sgdid_alias=bytes, chromosome=bytes,
start=bytes, stop=bytes, strand=bytes[1], genetic_pos=bytes, coordinate_version=bytes[10],
sequence_version=bytes, description=bytes)"""
res = Read(Fetch("http://downloads.yeastgenome.org/curation/chromosomal_feature/SGD_features.tab"),dtype=rtype)
Note that, instead of specifying the type, we could also just have named the slicees that were needed, for example using::
res = res/{'f3': 'feat_name', 'f8':'chromosome', 'f9':'start'}
This would rename field 3, 8 and 9 (starting from 0!).
Type casting
^^^^^^^^^^^^
While not all fields will be used in this example, for the purpose of the tutorial we will attempt to prepare the whole dataset for easy use.
First, when reading a file like this one, all input data is in string(bytes) format. For some slices this is not the ideal format.
Therefore, we change the types of certain slices from ``bytes`` to ``int`` and ``real`` types. This is an operation that is known as casting::
res = res.To(_.start, _.stop, Do=_.Cast("int?"))
res = res.To(_.genetic_pos, Do=_.Cast("real64?"))
``To`` is a utility function, which allow one to apply other operations to a subselection
of the slices in a data set. In this case, we cast the ``start`` and ``stop`` slice to an integer type, and the ``genetic_pos``
slice to a double floating point type. Note that we do specify ``int?``, i.e. with a
question mark sign. The indicates that missing values (empty fields) are allowed.
.. note::
Maybe you ask yourself why we do not use the following approach::
>>> res.genetic_pos = res.genetic_pos.Cast("real64?")
The reason is that res could have been used in another query before executing this command. Changing res by
performing this operation would therefore lead to some problems because of the lazy nature of query execution in Ibidas.
It might be possible to allow this in the future, however it would require some trickery/overhead. So, for now, we use the approach
with the ``To`` operation.
Applying a regular Python function and filtering
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Next, we take a look at the ``gene_aliases`` slice, which contains multiple gene aliases separated by the '|' symbol.
We would like to split these strings into individual names, and remove the empty names. For the split operation, we
use the standard Python split function. The whole expression becomes::
>>> splitfunc = _.split('|')
>>> res.gene_aliases.Each(splitfunc).Elems()[_ != ""]
`splitfunc` is here a context operator based expression, which can be applied to a string in order to split it.
``Each`` applies a regular python function or a context object to each element in a slice. The slice returning from this has
in this case as type lists of strings, as that is the output of the splitfunc operation.
``Elems`` `unpacks` this resulting list of names, such that subsequent operations will be performed on the list elements instead of the list itself.
``Filter``, denoted by the `[]`, only keeps elements (denoted by the context operator) that are unequal to the empty string.
.. note::
Note that Ibidas cannot know what type will result from the function used in the ``Each`` operation. For that reason it will automatically
perform type detection when necessary for subsequent operations. It is possible to prevent this by specifying the type at forehand.
Also, instead of the context operation one can use regular python functions, which (at the moment) execute slightly faster::
>>> splitfunc = lambda x: x.split('|')
>>> dtype = "[aliases:~]<bytes"
>>> res.gene_aliases.Each(splitfunc, dtype=dtype).Elems()[_ != ""]
(lambda allows one to define anonymous functions in Python)
To make these modified gene_aliases slice part of the dataset, we apply them again using the ``To`` function, and store the results using ``Copy``::
splitfilter = _.Each(splitfunc, dtype=dtype).Elems()[_ != ""]
yeast_feats = res.To(_.gene_aliases, Do=splitfilter).Copy()
Short version
^^^^^^^^^^^^^
To obtain both datasets directly, use::
yeast_feats = Get.yeast.genomic_feats()
yeastract = Get.yeast.yeastract()
Linking the datasets
~~~~~~~~~~~~~~~~~~~~
Now, we have to link both the yeastract dataset and the genomic features dataset. This is done by matching the ``targets`` in the Yeastract dataset
with the ``feat_name`` slice in the genomic features dataset. This can be accomplished using the ``Match`` operation, which links rows in two datasets
based on equality of the entries in two slices.
For example, we could use::
>>> tf_feat = yeastract |Match(_.target, _.feat_name)| yeast_feats
to match both datasets on their target and feat_name slice.
However, there is the small problem that both datasets have different upper/lowercase usage, due to which
most target and feat_name names do not match with each other.
So, instead, we convert each target and feat_name to upper case before matching::
>>> tf_feat = yeastract |Match(_.target.Each(str.upper), _.feat_name.Each(str.upper))| yeast_feats
>>> tf_feat #only showing a few slices...
Slices: | trans_factor | target | sgdid | feat_type | feat_qual
-----------------------------------------------------------------------------------------------------------
Type: | bytes | bytes | bytes | bytes | bytes
Dims: | yeastract_feats:* | yeastract_feats:* | yeastract_feats:* | yeastract_feats:* | yeastract_feats:*
Data: | | | | |
| Gcr2 | YAL008w | S000000006 | ORF | Verified
| Met4 | YAL008w | S000000006 | ORF | Verified
| Otu1 | YAL008w | S000000006 | ORF | Verified
| ... | ... | ... | ... | ...
When using a regular ``Match`` operation, any ``target`` row for which no entry can be found in ``feat_name`` will be left out, and vice versa (there are options to prevent this).
Sidestep: Checking what is linked
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The linking of both datasets is now complete. In this section, we will determine which elements could not be linked, and see if we can do better.
These steps are performed just to introduce some commands and concepts, and are not necessary to complete the example.
First, we do a quick check to determine how many rows in the yeastract dataset could not be matched. A naive approach to this would be::
>>> yeastract.target.Count() - tf_feat.target.Count()
Slices: | target
----------------
Type: | int64
Dims: |
Data: |
| 72
On a total of 48010 pairs, it appears thus that we lost only a few transcription factor-target pairs.
This assumes however that `yeast_feats` did not have any non-unique names in `feat_name`, as repeated names will match multiple times to the
same entry in yeastract, and thus increases the number of entries. As an illustration, say we have::
>>> d1 = Rep([1,2,3,3])
>>> d2 = Rep([1,3,3])
>>> d1 |Match| d2
Slices: | data
---------------
Type: | int64
Dims: | d1:*
Data: |
| 1
| 3
| 3
| 3
| 3
Thus, two rows with 3's match in ``d1`` match each to two rows of 3's in ``d2``, resulting in 2 * 2 rows of 3's in the output.
It is easy to determine that `yeast_feats` does not have such non-unique names, using::
>>> yeast_feats.feat_name[_ != ""].Get(_.Count() == _.Unique().Count())
Slices: | feat_name
-------------------
Type: | bool
Dims: |
Data: |
| True
This command removes the empty feat_names (which do not occur in `yeastract`), and then counts the remaining feat_names, and compares this to a count of the remaining unique feat_names.
However, even a better approach is to circumvent this extra assumption, by checking if the rows in yeastract do actually occur in tf_feat::
>>> (yeastract |Except| tf_feat.Get(_.trans_factor, _.target)).Count()
Slices: | trans_factor | target
-------------------------------
Type: | int64 | int64
Dims: | |
Data: | |
| 72 | 72
This introduces the ``Except`` command. This command only keeps rows of yeastract that do not occur in tf_feat. These rows are subsequently counted. Note that this gives the same answer as
we had before.
A shorter version of this command, that also scales to cases in which `yeastract` has many slices, is the following::
>>> (yeastract |Except| tf_feat.Get(*yeastract.Names)).Count()
Next, we determine which target names could not matched::
>>> nonmatched = yeastract.target |Except| tf_feat.target
>>> nonmatched.Show()
Slices: | target
---------------------------------------
Type: | bytes
Dims: | syeastract_syeastract_feats:*
Data: |
| YLR157w-c
| A1
| YJL012c-a
| MALT
| MALS
| snR20
| A2
| YAR044w
| RDN5
| YJL017w
| ALD1
| YGR272c
| YBL101w-b
| YBL101w-c
| YDL038c
| YBL101w-a
| TER1
| SUC6
| YDR524w-a
| YDR474c
| YBR075w
| DEX2
Using ``Except``, we keep only the targets in yeastract that do not occur in ``tf_feat.target``. Another lower level way to accomplish the same result
would be::
>>> non_matched = (yeastract.target.Set() - tf_feat.target.Set()).Elem()
``Set`` is used to pack the elements of the (by default last) dimension into a set. A set is a collection of objects
in which each element is unique. That is, adding the string "YLR157W-C" multiple times to a set will result in a set with just one occurence of "YLR157W-C".
Sets have some special operations defined on them. One of them is set substraction, which was used here. It removes all elements in the set of the first operand that
also occur in the set of the second operand, leaving only the elements that do not occur in the second operand. In this case thus the elements that were not matched by the Match operation.
Next, we use the ``Elem`` operation to unpack the resulting set.
The names in the list suggest that we might find matching rows by looking either at the ``gene_name`` or ``gene_aliases`` column of the `yeast_feats` dataset
Before we do this, we first convert each name in nonmatched to uppercase::
>>> nonmatched = nonmatched.Each(str.upper)
First, we check the ``gene_name`` column. This does not give any matches however::
>>> nonmatched |In| yeast_feats.gene_name.Each(str.upper)
Slices: | result
-----------------------------
Type: | bool
Dims: | stftargets_sfeats:*
Data: |
| False
| False
| False
| ...
(Use Show() to see the whole result). This introduces the ``In`` operation, which determines if elements in the left operand occur in the (by default last) dimension of the right operand.
Next we look at the gene_aliases column. As you might remember this slice does contain nested arrays of aliases. So what will ``|In|`` return here?::
>>> nonmatched.Each(str.upper) |In| yeast_feats.gene_aliases.Each(str.upper)
Slices: | result
----------------------------------------------------
Type: | bool
Dims: | stftargets_sfeats:*<feats:*
Data: |
| [False False False ..., False False False]
| [False False False ..., False False False]
| [False False False ..., False False False]
| ...
As you can see, ``|In|`` matches with the last dimension of ``gene_aliases``. This means that there are multiple aliases list to be matched, which together with
the multiple names to be tested results in a matrix of results, of size(non_matched) by size(yeast_feats). Of course, this is not what we exactly want. We can solve this using ``Any``::
>>> Any(nonmatched |In| yeast_feats.gene_aliases.Each(str.upper))
Slices: | result
-----------------------------
Type: | bool
Dims: | stftargets_sfeats:*
Data: |
| True
| True
| True
| ...
This aggregates across the ``feats`` dimension, to determine if any of the features had any alias that matched something in our list. Indeed, we found
matches for the targets.
We will use the Match function to find which genes match to these non-matched targets (we could also have done this directly of course, but that would have prevented us from introducing some operations).
Using Flat, we flatten the gene alias list, and then apply Match as we did before::
>>> nonmatched_feats = nonmatched |Match(_.target, _.gene_aliases.Each(str.upper))| yeast_feats.Flat()
>>> nonmatched_feats
Slices: | target | sgdid | feat_type | feat_qual | feat_name
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Type: | bytes[11] | bytes | bytes | bytes | bytes[11]
Dims: | stftargets_sfeats_feats_falias~ | stftargets_sfeats_feats_falias~ | stftargets_sfeats_feats_falias~ | stftargets_sfeats_feats_falias~ | stftargets_sfeats_feats_falias~
Data: | | | | |
| YLR157W-C | S000028678 | ORF | Uncharacterized | YLR157W-E
| YAR044W | S000000081 | ORF | Verified | YAR042W
| YBL101W-C | S000028598 | ORF | Uncharacterized | YBL100W-C
| YBL101W-A | S000002148 | transposable_element_gene | | YBL100W-A
| YJL017W | S000003553 | ORF | Uncharacterized | YJL016W
| A1 | S000029660 | not in systematic sequence of ~ | | MATA1
| YJL012C-A | S000003549 | ORF | Verified | YJL012C
| MALT | S000000502 | ORF | Verified | YBR298C
| MALT | S000003521 | ORF | Verified | YGR289C
| MALT | S000029681 | not in systematic sequence of ~ | | MAL21
| MALT | S000029686 | not in systematic sequence of ~ | | MAL41
| MALT | S000029658 | not in systematic sequence of ~ | | MAL61
| MALS | S000000503 | ORF | Verified | YBR299W
| MALS | S000003524 | ORF | Verified | YGR292W
| ... | ... | ... | ... | ...
This shows a possible reason why some of these targets do not have an offical name, as they match to multiple genomic features. However, other targets
only have a single corresonding genomic feature, and could have been linked. To improve our mapping, we decide to redo our match, and include rows
that have a unique ``gene_alias`` match. Our strategy is as follows:
1. Filter out gene_aliases that occur multiple times, as we only want unique matches
2. Convert yeastract targets names that match to gene_aliases to the corresponding feat_names
3. Rematch the data.
First, we determine what names need to be filtered, and filter these from the gene_aliases::
>>> unique_gene_aliases = yeast_feats.Flat().GroupBy(_.gene_aliases)[Count(_.feat_name) == 1].gene_aliases
>>> name_alias_list = yeast_feats[_.gene_aliases |In| unique_gene_aliases]
The first command flattens the nested gene_alias lists, to get a flat table (If there were would have been more than one nested list
dimension, we would have had to specify `yeast_feats.Flat(_.gene_aliases)`).
Next, we group the data on common gene_aliases, and then remove those gene_aliases that have more than more than one associated feat_name.
Subsequently, we filter the yeast_feats table, such that we only keep the gene_aliases that are in the list of unique gene aliases.
In the second step, we convert the yeastract names that occur in the gene_aliases. This can be done using the ``TakeFrom`` command::
>>> convert_table = name_alias_list.Get(_.gene_aliases.Each(str.upper), _.feat_name).Flat()
>>> yeastract = yeastract.To(_.target, Do=_.Each(str.upper).TakeFrom(convert_table, keep_missing=True))
The TakeFrom command takes a two-slice table (convert_table), and converts the target names that occur in the first slice of the
table to the names of the second slice of the table. We set keep_missing to true, to also keep the names that do not occur in the
gene_aliases.
Now we can redo our match, as we did before::
>>> tf_feat = yeastract |Match(_.target.Each(str.upper), _.feat_name.Each(str.upper))| yeast_feats
Counting again the number of yeastract rows that could be matched, we find::
>>> (yeastract |Except| tf_feat.Get(*yeastract.Names)).Count()
Slices: | trans_factor | target
-------------------------------
Type: | int64 | int64
Dims: | |
Data: | |
| 6 | 6
Thus, 72 - 6 = 66 additional rows in yeastract have been matched.
Short version
^^^^^^^^^^^^^
To obtain directly the results of the last section, do::
#remove non-unique gene_aliases
>>> name_alias_list = yeast_feats[_.gene_aliases |In| _.Flat().GroupBy(_.gene_aliases)[Count(_.feat_name) == 1].gene_aliases]
#convert yeastract target names that match to gene_aliases, to the corresponding feat_names
>>> convert_table = name_alias_list.Get(_.gene_aliases.Each(str.upper), _.feat_name).Flat()
>>> yeastract = yeastract.To(_.target, Do=_.Each(str.upper).TakeFrom(convert_table, keep_missing=True))
>>> tf_feat = yeastract |Match(_.target.Each(str.upper), _.feat_name.Each(str.upper))| yeast_feats
Save dataset
~~~~~~~~~~~~
First, we save the current dataset. This can be done using::
>>> Save(tf_feat, 'tf_feat.dat')
The data can be loaded again using::
>>> tf_feat = Load('tf_feat.dat')
Chromosome distribution
~~~~~~~~~~~~~~~~~~~~~~~
We start with determining for each transcription factor the number of targets per chromosome. To do this, we use a two-dimensional group, grouping both on transcription factor
and chromosome, and counting the number of targets per transcription_factor / chromosome pair::
>>> tf_feat = tf_feat.GroupBy(_.trans_factor, _.chromosome)
>>> res = tf_feat.Get(_.trans_factor, _.chromosome, _.target.Count()/"count", _.start).Copy()
>>> res
Slices: | trans_factor | chromosome | count | start
-----------------------------------------------------------------------------------------------------------------------------------------------------------------
Type: | bytes | bytes | int64 | int64?
Dims: | gtrans_factor:* | gchromosome:* | gtrans_factor:*<gchromosome:* | gtrans_factor:*<gchromosome:*<gyeastract_feats:~
Data: | | | |
| Gcr2 | 1 | [17 48 60 37 40 32 24 31 80 48 29 52 16 42 8 32] | [ [136914 36509 2169 186321 21566 31567 222406 221049 92~
| Met4 | 2 | [ 23 92 100 78 89 61 54 85 165 110 69 110 37 96~ | [ [136914 130799 74020 67520 21566 58462 31567 151166 39~
| Otu1 | 7 | [ 4 11 9 5 2 1 4 5 11 6 2 4 8 0 0 7] | [[136914 135665 110430 158619]; [278352 568426 738369 6~
| Cin5 | 16 | [10 37 54 40 50 38 23 24 67 57 28 65 8 47 9 32] | [[73518 169375 74020 58462 45022 190193 129270 71786 334~
| Gcn4 | 10 | [ 22 92 108 92 94 73 49 84 154 97 68 106 24 104~ | [ [169375 36509 192619 130799 67520 203403 58462 54789 4~
| Zap1 | 11 | [ 4 13 22 10 11 10 2 13 19 17 9 14 12 19 3 7] | [[186321 151166 99697 155005]; [724456 686901 382030 81~
| Yap7 | 9 | [ 2 13 11 12 14 15 2 11 30 19 15 12 4 13 3 8] | [[130799 119541]; [724456 168423 583720 582652 331511 7~
| Ste12 | 14 | [ 37 153 215 158 129 113 93 131 227 200 93 182 70 159~ | [ [169375 164187 92270 192619 67520 21566 69525 203403 1~
| Arr1 | 4 | [ 4 1 85 87 51 54 28 76 4 90 52 97 6 81 26 1] | [[31567 222406 120225 119541] [316968]; [828625 87981 4~
| Aft2 | 12 | [ 1 15 20 11 9 8 6 14 22 25 9 18 2 14 6 13] | [[110430]; [382030 633622 447709 635146 393123 426489 5~
| Ecm22 | 8 | [ 6 15 25 33 22 13 13 17 31 27 13 17 4 11 6 17] | [[94687 218140 2169 186321 220198 177023]; [444693 7144~
| Ino4 | 15 | [ 7 40 69 49 38 22 26 53 86 45 43 61 24 40 7 27] | [[94687 21566 69525 45022 71786 13743 45899]; [13879 31~
| Aft1 | 3 | [ 22 75 114 82 72 53 50 71 127 87 65 104 26 94~ | [ [36509 87031 129019 203403 31567 222406 45022 190193 1~
| Sok2 | 13 | [ 31 62 98 68 62 51 54 69 97 58 70 109 24 72~ | [ [218140 164187 92270 82706 192619 67520 21566 175135 6~
| ... | ... | ... | ...
Note that each slice has now a different dimension. Trans_factor and chromosome both have a single dimension, with all unique values. The count slice contains a matrix,
with counts for each transcription_factor/chromosome pair, and ``start`` contains for each transcription factor/chromosome pair a list of all gene start positions.
To calculate now a correlation correlation between transcription factors, based on if they target the same chromosomes, we can simply do::
>>> Corr(res.count)
However, the resulting correlations are positively biased as we did not control for the different numbers of genes on each chromosome.
To normalize the count data, we divide by the total number of targets per chromosome::
>>> Corr(res.count.Cast("real64") / res.count.Sum("gtrans_factor").count)
Slices: | count
-----------------------------------------------------------------------------------------------------------------------------------------------------------------
Type: | real64
Dims: | gtrans_factor:*<gtrans_factor:*
Data: |
| [ 1. 0.84058821 0.56884259 0.46862953 0.75701405 0.60343542; 0.54598067 0.78050323 0.1699565 0.39857328 0.695562 0.63384689; ~
| [ 0.84058821 1. 0.34636467 0.40345917 0.86052624 0.61812576; 0.68426206 0.92512018 0.45395797 0.67508341 0.72107807 0.78040341; ~
| [ 0.56884259 0.34636467 1. -0.15274769 0.07486133 0.58630083; -0.02208724 0.38574002 -0.41424377 -0.10392024 0.22757454 0.38601169; ~
| [ 0.46862953 0.40345917 -0.15274769 1. 0.71668069 0.07167468; 0.42931201 0.35479816 0.32588663 0.34150679 0.43178954 0.21771547; ~
| [ 0.75701405 0.86052624 0.07486133 0.71668069 1. 0.40977524; 0.70526491 0.78290387 0.52340019 0.64076449 0.73000811 0.62646315; ~
| [ 0.60343542 0.61812576 0.58630083 0.07167468 0.40977524 1.; 0.48239639 0.64139371 0.21699255 0.25563464 0.22584824 0.6321228; 0.5578196~
| [ 0.54598067 0.68426206 -0.02208724 0.42931201 0.70526491 0.48239639; 1. 0.54775513 0.41362316 0.54245825 0.47982939 0.57266579; ~
| [ 0.78050323 0.92512018 0.38574002 0.35479816 0.78290387 0.64139371; 0.54775513 1. 0.48832401 0.73255135 0.74640988 0.70272516; ~
| [ 0.1699565 0.45395797 -0.41424377 0.32588663 0.52340019 0.21699255; 0.41362316 0.48832401 1. 0.54753551 0.45464972 0.41855642; ~
| [ 0.39857328 0.67508341 -0.10392024 0.34150679 0.64076449 0.25563464; 0.54245825 0.73255135 0.54753551 1. 0.53611385 0.49655451; ~
| [ 0.695562 0.72107807 0.22757454 0.43178954 0.73000811 0.22584824; 0.47982939 0.74640988 0.45464972 0.53611385 1. 0.54252082; ~
| [ 0.63384689 0.78040341 0.38601169 0.21771547 0.62646315 0.6321228; 0.57266579 0.70272516 0.41855642 0.49655451 0.54252082 1.; 0.7725603~
| [ 0.84629762 0.94229773 0.2511703 0.4532385 0.8736701 0.55781963; 0.6038615 0.89819293 0.58601108 0.64679905 0.75517794 0.77256031; ~
| [ 0.73136577 0.58847393 0.41657058 0.49813967 0.64892351 0.26202592; 0.24997758 0.5509231 0.13508773 0.25581194 0.58083685 0.46142624; ~
| ...
Note that we first cast to double, as integer division will only result in whole integers. We sum along the `gtrans_factor` dimension to determine the number of targets
per chromosome. The division operator knows on which dimension it should divide and how it should broadcast, as it can use the dimension identities.
As you can see, a square matrix is calculated with all correlation coefficients. What if we now want to calculate a correlation between chromosomes instead? We
first normalize by dividing by the total targets per transcription factor, and then perform the correlation on the transposed matrix::
>>> chr_normtf = res.To(_.count, Do=_.Cast("real64") / _.count.Sum("gchromosome"))
>>> Corr(chr_normtf.count.Transpose())
Slices: | count
------------------------------------------------------------------------------------------------------------------------------------
Type: | real64
Dims: | gchromosome:*<gchromosome:*
Data: |
| [ 1. 0.03923059 -0.04462679 -0.04095488 -0.07967297 0.15137151; 0.07687509 -0.03606974 -0.07361156 -0.0433545~
| [ 0.03923059 1. -0.11188931 -0.01870759 -0.14574641 0.33008009; -0.16066959 -0.03152373 0.06936447 -0.0485143~
| [-0.04462679 -0.11188931 1. -0.13657844 -0.11330372 -0.08524838; 0.06324146 -0.19655547 -0.17755139 0.1707482~
| [-0.04095488 -0.01870759 -0.13657844 1. -0.14262329 -0.10862002; -0.16384278 -0.08640234 -0.20092264 -0.2123649~
| [-0.07967297 -0.14574641 -0.11330372 -0.14262329 1. -0.05929658; -0.13892804 -0.12049258 -0.13792648 -0.0087695~
| [ 0.15137151 0.33008009 -0.08524838 -0.10862002 -0.05929658 1.; -0.19181151 -0.15118885 0.08037876 -0.11170856 -0.170~
| [ 0.07687509 -0.16066959 0.06324146 -0.16384278 -0.13892804 -0.19181151; 1. 0.05264461 0.15371278 0.0332715~
| [-0.03606974 -0.03152373 -0.19655547 -0.08640234 -0.12049258 -0.15118885; 0.05264461 1. -0.20148438 -0.1066510~
| [-0.07361156 0.06936447 -0.17755139 -0.20092264 -0.13792648 0.08037876; 0.15371278 -0.20148438 1. -0.0583490~
| [-0.04335456 -0.04851435 0.17074828 -0.21236499 -0.00876959 -0.11170856; 0.03327151 -0.10665103 -0.05834909 1. ~
| [-0.06679718 -0.18103018 -0.18679103 0.03853411 0.02583352 -0.17084297; -0.16592298 -0.08229363 -0.21797934 -0.1814246~
| [-0.06091041 -0.19461361 -0.15352177 -0.11909199 -0.15047679 -0.07791267; -0.01086654 -0.09348471 0.01221415 -0.1780629~
| [ 0.00375829 0.05897814 0.00921063 0.0324667 -0.00383316 0.02390791; 0.02505551 -0.01051652 -0.07514931 -0.0357086~
| [-0.01192451 -0.06100544 -0.07468588 -0.07741828 -0.1110399 -0.1256938; -0.05487917 0.11389759 0.02129484 0.10186774~
| ...
For this we use the ``Transpose`` operation, which can be used to reorder the dimensions of slices. Of course, from this matrix it is hard to identify which columns/rows correspond to which chromosome.
So we would like to order on chromosome number. As chromosome is currently a bytes type, the ``Sort`` operation would perform an alphabetic ordering. We therefore
convert chromosome to an integer (allowing for missing values, as not all genes have an associated chromosome) before sorting::
>>> chr_normtf.Sort(_.chromosome.Cast("int?")).Get(_.chromosome, Corr(_.count.Transpose()/"chromo_corr")).Show()
Slices: | chromosome | chromo_corr
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Type: | int64? | real64?
Dims: | gchromosome:* | gchromosome:*<gchromosome:*
Data: | |
| 1 | [1.0 0.0392305859056 -0.0119245063359 -0.0736115605627 0.126794737292; 0.0828449457975 -0.0446267914596 -0.0667971839521 0.0768750883472; -0.~
| 2 | [0.0392305859056 1.0 -0.0610054408293 0.069364465012 0.00176664423332; -0.0648967053241 -0.111889306527 -0.181030176875 -0.160669588901; -0.1~
| 3 | [-0.0119245063359 -0.0610054408293 1.0 0.0212948400421 0.137105024317; 0.040951946291 -0.0746858789864 -0.136588616096 -0.0548791659491; -0.1~
| 4 | [-0.0736115605627 0.069364465012 0.0212948400421 1.0 0.109860736686; 0.0822441736783 -0.177551393828 -0.217979339847 0.153712776242; -0.13792~
| 5 | [0.126794737292 0.00176664423332 0.137105024317 0.109860736686 1.0; 0.203198536954 -0.0357373652455 -0.23231214159 -0.0247653270345; -0.01625~
| 6 | [0.0828449457975 -0.0648967053241 0.040951946291 0.0822441736783; 0.203198536954 1.0 -0.0202264350435 -0.11125475417 -0.0138921581198; -0.047~
| 7 | [-0.0446267914596 -0.111889306527 -0.0746858789864 -0.177551393828; -0.0357373652455 -0.0202264350435 1.0 -0.186791033472 0.0632414609691; -0~
| 8 | [-0.0667971839521 -0.181030176875 -0.136588616096 -0.217979339847; -0.23231214159 -0.11125475417 -0.186791033472 1.0 -0.165922975268; 0.02583~
| 9 | [0.0768750883472 -0.160669588901 -0.0548791659491 0.153712776242; -0.0247653270345 -0.0138921581198 0.0632414609691 -0.165922975268 1.0; -0.1~
| 10 | [-0.0796729677666 -0.145746411585 -0.111039896564 -0.137926483313; -0.0162510011049 -0.0471924113471 -0.113303722071 0.0258335246367; -0.1389~
| 11 | [0.151371512687 0.330080094412 -0.12569380041 0.0803787562745; 0.215208353694 0.069267205163 -0.0852483767543 -0.170842971844; -0.19181151482~
| 12 | [-0.0433545580052 -0.048514346702 0.101867737225 -0.0583490943535; 0.00349728862612 0.0928809373801 0.170748282214 -0.181424654113; 0.0332715~
| 13 | [0.0984893827189 -0.0649125722394 -0.0498315789475 -0.0299329134474; 0.201054942739 0.143592852561 -0.0271264696499 -0.205023822594; 0.179569~
| 14 | [-0.0360697420734 -0.0315237313111 0.113897592107 -0.201484376028; -0.187782203257 -0.182313717367 -0.196555466146 -0.0822936292235; 0.052644~
| 15 | [-0.0609104133205 -0.194613609877 -0.0966953240163 0.0122141518555; -0.112996020284 -0.0504902614406 -0.15352176825 -0.154372238875; -0.01086~
| 16 | [-0.0409548824216 -0.0187075856301 -0.0774182808418 -0.20092264034; -0.117518827147 -0.072661793534 -0.136578435161 0.0385341077911; -0.16384~
| 17 | [0.0404627721039 0.078285703677 -0.0464339341667 -0.0313786020908; -0.0676937374275 0.129106527976 -0.0483567167497 -0.00501764528626; 0.0048~
| -- | [0.00375829063026 0.0589781371356 0.0190755740297 -0.0751493121398; 0.0134731513957 0.0904799039122 0.00921063035375 -0.0219148554581; 0.0250~
We plot the results using matplotlib::
>>> from matplotlib.pylab import *
res = chr_normtf.Sort(_.chromosome.Cast("int?")).Get(_.chromosome, Corr(_.count.Transpose()/"chromo_corr"))
imshow(res.chromo_corr(), interpolation='nearest')
xticks(Pos(res.chromosome)(), res.chromosome())
yticks(Pos(res.chromosome)(), res.chromosome())
colorbar()
show()
.. image:: chromo_corr.png
Transcription factor specificity
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
As last step, we like to calculate to what extent transcription factors target specific chromosomes.
First, we obtain a dataset that is normalized for counts per chromosome::
>>> chr_normchr = res.To(_.count, Do=_.Cast("real64") / _.count.Sum("gtrans_factor"))
Next, we group for each TF the chromosome counts from low to high. Subsequently, we
sum across the rows, for all transcription factors, to get the following result::
>>> chr_normtf.count.Sort().Sum("gtrans_factor")
Slices: | count
-------------------------
Type: | real64
Dims: | gchromosome:*
Data: |
| 0.0300120048019
| 0.19843303089
| 0.55413076386
| 0.653791379541
| 0.718104362671
| 0.776878372718
| 0.829423749173
| 0.885342987674
| 0.927864695259
| 0.962609282328
| 1.00208988772
| 1.05152541613
| 1.1006070164
| 1.15928017163
| 1.22155081093
| 1.30419432548
| 1.50241732864
| 3.12174441416
It seems that indeed there is some chromosome specificness for transcription factors
(although making this a hard conclusion would probably require a permutation analysis). Try
for yourself to see if the effect persists if you remove all transcription factors with less than 20
targets from the data.
We plot the results using matplotlib::
>>> from matplotlib.pylab import *
>>> plot(normalized_counts.Sort().Sum("gtrans_factor")())
>>> title("Chromosome specificness of transcription factors")
>>> ylabel("Normalized target counts")
>>> xlabel("Less visited --> Most visited chromosome")
>>> show()
.. image:: chromo_spec.png
Summary
~~~~~~~
To directly get the results, do::
#data import
>>> yeast_feats = Get.yeast.genomic_feats()
>>> yeastract = Get.yeast.yeastract()
#structurize data
>>> res = yeastract |Match(_.target.Each(str.upper), _.feat_name.Each(str.upper))| yeast_feats
>>> res = res.GroupBy(_.trans_factor, _.chromosome)
>>> res = res.Get(_.trans_factor, _.chromosome, _.target.Count()/"count", _.start).Copy()
#tf similarity
>>> chr_normchr = res.To(_.count, Do=_.Cast("real64") / _.count.Sum("gtrans_factor"))
>>> chr_normchr.Get(_.trans_factor, Corr(_.count))
#chromosome similarity, sorted on chromosome
>>> chr_normtf = res.To(_.count, Do=_.Cast("real64") / _.count.Sum("gchromosome"))
>>> chr_normtf.Sort(_.chromosome.Cast("int?")).Get(_.chromosome, Corr(_.count.Transpose()/"chromo_corr")).Show()
#tf specificity
>>> chr_normtf.count.Sort().Sum("gtrans_factor")
| PypiClean |
/NeodroidAgent-0.4.8-py36-none-any.whl/neodroidagent/common/memory/exclude/wtf/data_structures/Replay_Buffer.py | import random
from collections import deque, namedtuple
import numpy as np
import torch
class Replay_Buffer(object):
"""Replay buffer to store past experiences that the agent can then use for training data"""
def __init__(self, buffer_size, batch_size, seed):
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience",
field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def add_experience(self, states, actions, rewards, next_states, dones):
"""Adds experience(s) into the replay buffer"""
if type(dones) == list:
assert type(dones[0]) != list, "A done shouldn't be a list"
experiences = [self.experience(state, action, reward, next_state, done)
for state, action, reward, next_state, done in
zip(states, actions, rewards, next_states, dones)]
self.memory.extend(experiences)
else:
experience = self.experience(states, actions, rewards, next_states, dones)
self.memory.append(experience)
def sample(self, num_experiences=None, separate_out_data_types=True):
"""Draws a random sample of experience from the replay buffer"""
experiences = self.pick_experiences(num_experiences)
if separate_out_data_types:
states, actions, rewards, next_states, dones = self.separate_out_data_types(experiences)
return states, actions, rewards, next_states, dones
else:
return experiences
def separate_out_data_types(self, experiences):
"""Puts the sampled experience into the correct format for a PyTorch neural network"""
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(
self.device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(
self.device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(
self.device)
next_states = torch.from_numpy(
np.vstack([e.next_state for e in experiences if e is not None])).float().to(self.device)
dones = torch.from_numpy(np.vstack([int(e.done) for e in experiences if e is not None])).float().to(
self.device)
return states, actions, rewards, next_states, dones
def pick_experiences(self, num_experiences=None):
if num_experiences is not None:
batch_size = num_experiences
else:
batch_size = self.batch_size
return random.sample(self.memory, k=batch_size)
def __len__(self):
return len(self.memory) | PypiClean |
/DataProcessing_Package-tewariutkarsh-0.0.1.tar.gz/DataProcessing_Package-tewariutkarsh-0.0.1/src/DataProcessing_Package/DataProcessing.py | from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from pandas_profiling import ProfileReport
class PreProcess:
def __init__(self, data):
self.data = data
self.check()
def check(self):
if(self.checkDF()==True or self.checkNumArr()==True):
self.DF()
else:
raise "Expected a 2D numpy array or a DataFrame"
def __checkT(self, df):
if(type(df)==type(pd.DataFrame())):
return True
elif(type(df)==type(np.array()) and df.ndim>1):
return True
else:
return False
def StandardScalar(self, d_std):
if(self.__checkT(d_std)==True):
scaler = StandardScaler()
scaler.fit_transform(self.data)
return scaler.transform(d_std)
else:
raise f"Expected a 2D numpy array or a DataFrame for d_std input"
def Report(self):
if (self.checkDF()):
return ProfileReport(self.data)
elif (self.checkNumArr()):
return ProfileReport(pd.DataFrame(self.data))
else:
raise f"Expected a 2D numpy array or a DataFrame"
def Describe(self):
if (self.checkDF()):
return self.data.describe()
elif (self.checkNumArr()):
return pd.DataFrame(self.data).describe()
else:
raise f"Expected a 2D numpy array or a DataFrame"
def Info(self):
if(self.checkDF()):
return self.data.info()
elif(self.checkNumArr()):
return pd.DataFrame(self.data).info()
else:
raise f"Expected a 2D numpy array or a DataFrame"
def DF(self):
if(self.checkNumArr()):
return pd.DataFrame(self.data)
elif(self.checkDF()):
return self.data
else:
raise f"Expected a 2D numpy array or a DataFrame"
def checkDF(self, d=pd.DataFrame()):
if(type(self.data) == type(pd.DataFrame())):
return True
return False
def checkNumArr(self, d=pd.DataFrame()):
if(type(self.data) == type(np.array())):
if(self.data.ndim > 1):
return True
else:
return False
return False | PypiClean |
/HamlPy3-0.84.0.tar.gz/HamlPy3-0.84.0/hamlpy/template/loaders.py | import os
try:
from django.template import TemplateDoesNotExist
from django.template.loaders import filesystem, app_directories
_django_available = True
except ImportError as e:
class TemplateDoesNotExist(Exception):
pass
_django_available = False
from hamlpy import hamlpy
from hamlpy.template.utils import get_django_template_loaders
# Get options from Django settings
options_dict = {}
if _django_available:
from django.conf import settings
if hasattr(settings, 'HAMLPY_ATTR_WRAPPER'):
options_dict.update(attr_wrapper=settings.HAMLPY_ATTR_WRAPPER)
def get_haml_loader(loader):
if hasattr(loader, 'Loader'):
baseclass = loader.Loader
else:
class baseclass(object):
def load_template_source(self, *args, **kwargs):
return loader.load_template_source(*args, **kwargs)
class Loader(baseclass):
def load_template_source(self, template_name, *args, **kwargs):
name, _extension = os.path.splitext(template_name)
# os.path.splitext always returns a period at the start of extension
extension = _extension.lstrip('.')
if extension in hamlpy.VALID_EXTENSIONS:
try:
haml_source, template_path = super(Loader, self).load_template_source(
self._generate_template_name(name, extension), *args, **kwargs
)
except TemplateDoesNotExist:
pass
else:
hamlParser = hamlpy.Compiler(options_dict=options_dict)
html = hamlParser.process(haml_source)
return html, template_path
raise TemplateDoesNotExist(template_name)
load_template_source.is_usable = True
def _generate_template_name(self, name, extension="hamlpy"):
return "%s.%s" % (name, extension)
return Loader
haml_loaders = dict((name, get_haml_loader(loader))
for (name, loader) in get_django_template_loaders())
if _django_available:
HamlPyFilesystemLoader = get_haml_loader(filesystem)
HamlPyAppDirectoriesLoader = get_haml_loader(app_directories) | PypiClean |
/Bis-Miner-3.11.1.tar.gz/Bis-Miner-3.11.0/Orange/canvas/registry/qt.py | import bisect
from xml.sax.saxutils import escape
from urllib.parse import urlencode
from AnyQt.QtWidgets import QAction
from AnyQt.QtGui import QStandardItemModel, QStandardItem, QColor, QBrush
from AnyQt.QtCore import QObject, Qt
from AnyQt.QtCore import pyqtSignal as Signal
from .discovery import WidgetDiscovery
from .description import WidgetDescription, CategoryDescription
from .base import WidgetRegistry
from ..resources import icon_loader
from . import cache, NAMED_COLORS, DEFAULT_COLOR
class QtWidgetDiscovery(QObject, WidgetDiscovery):
"""
Qt interface class for widget discovery.
"""
# Discovery has started
discovery_start = Signal()
# Discovery has finished
discovery_finished = Signal()
# Processing widget with name
discovery_process = Signal(str)
# Found a widget with description
found_widget = Signal(WidgetDescription)
# Found a category with description
found_category = Signal(CategoryDescription)
def __init__(self, parent=None, registry=None, cached_descriptions=None):
QObject.__init__(self, parent)
WidgetDiscovery.__init__(self, registry, cached_descriptions)
def run(self, entry_points_iter):
self.discovery_start.emit()
WidgetDiscovery.run(self, entry_points_iter)
self.discovery_finished.emit()
def handle_widget(self, description):
self.discovery_process.emit(description.name)
self.found_widget.emit(description)
def handle_category(self, description):
self.found_category.emit(description)
class QtWidgetRegistry(QObject, WidgetRegistry):
"""
A QObject wrapper for `WidgetRegistry`
A QStandardItemModel instance containing the widgets in
a tree (of depth 2). The items in a model can be quaries using standard
roles (DisplayRole, BackgroundRole, DecorationRole ToolTipRole).
They also have QtWidgetRegistry.CATEGORY_DESC_ROLE,
QtWidgetRegistry.WIDGET_DESC_ROLE, which store Category/WidgetDescription
respectfully. Furthermore QtWidgetRegistry.WIDGET_ACTION_ROLE stores an
default QAction which can be used for widget creation action.
"""
CATEGORY_DESC_ROLE = Qt.UserRole + 1
"""Category Description Role"""
WIDGET_DESC_ROLE = Qt.UserRole + 2
"""Widget Description Role"""
WIDGET_ACTION_ROLE = Qt.UserRole + 3
"""Widget Action Role"""
BACKGROUND_ROLE = Qt.UserRole + 4
"""Background color for widget/category in the canvas
(different from Qt.BackgroundRole)
"""
category_added = Signal(str, CategoryDescription)
"""signal: category_added(name: str, desc: CategoryDescription)
"""
widget_added = Signal(str, str, WidgetDescription)
"""signal widget_added(category_name: str, widget_name: str,
desc: WidgetDescription)
"""
reset = Signal()
"""signal: reset()
"""
def __init__(self, other_or_parent=None, parent=None):
if isinstance(other_or_parent, QObject) and parent is None:
parent, other_or_parent = other_or_parent, None
QObject.__init__(self, parent)
WidgetRegistry.__init__(self, other_or_parent)
# Should the QStandardItemModel be subclassed?
self.__item_model = QStandardItemModel(self)
for i, desc in enumerate(self.categories()):
cat_item = self._cat_desc_to_std_item(desc)
self.__item_model.insertRow(i, cat_item)
for j, wdesc in enumerate(self.widgets(desc.name)):
widget_item = self._widget_desc_to_std_item(wdesc, desc)
cat_item.insertRow(j, widget_item)
def model(self):
"""
Return the widget descriptions in a Qt Item Model instance
(QStandardItemModel).
.. note:: The model should not be modified outside of the registry.
"""
return self.__item_model
def item_for_widget(self, widget):
"""Return the QStandardItem for the widget.
"""
if isinstance(widget, str):
widget = self.widget(widget)
cat = self.category(widget.category)
cat_ind = self.categories().index(cat)
cat_item = self.model().item(cat_ind)
widget_ind = self.widgets(cat).index(widget)
return cat_item.child(widget_ind)
def action_for_widget(self, widget):
"""
Return the QAction instance for the widget (can be a string or
a WidgetDescription instance).
"""
item = self.item_for_widget(widget)
return item.data(self.WIDGET_ACTION_ROLE)
def create_action_for_item(self, item):
"""
Create a QAction instance for the widget description item.
"""
name = item.text()
tooltip = item.toolTip()
whatsThis = item.whatsThis()
icon = item.icon()
if icon:
action = QAction(icon, name, self, toolTip=tooltip,
whatsThis=whatsThis,
statusTip=name)
else:
action = QAction(name, self, toolTip=tooltip,
whatsThis=whatsThis,
statusTip=name)
widget_desc = item.data(self.WIDGET_DESC_ROLE)
action.setData(widget_desc)
action.setProperty("item", item)
return action
def _insert_category(self, desc):
"""
Override to update the item model and emit the signals.
"""
priority = desc.priority
priorities = [c.priority for c, _ in self.registry]
insertion_i = bisect.bisect_right(priorities, priority)
WidgetRegistry._insert_category(self, desc)
cat_item = self._cat_desc_to_std_item(desc)
self.__item_model.insertRow(insertion_i, cat_item)
self.category_added.emit(desc.name, desc)
def _insert_widget(self, category, desc):
"""
Override to update the item model and emit the signals.
"""
assert(isinstance(category, CategoryDescription))
categories = self.categories()
cat_i = categories.index(category)
_, widgets = self._categories_dict[category.name]
priorities = [w.priority for w in widgets]
insertion_i = bisect.bisect_right(priorities, desc.priority)
WidgetRegistry._insert_widget(self, category, desc)
cat_item = self.__item_model.item(cat_i)
widget_item = self._widget_desc_to_std_item(desc, category)
cat_item.insertRow(insertion_i, widget_item)
self.widget_added.emit(category.name, desc.name, desc)
def _cat_desc_to_std_item(self, desc):
"""
Create a QStandardItem for the category description.
"""
item = QStandardItem()
item.setText(desc.name)
if desc.icon:
icon = desc.icon
else:
icon = "icons/default-category.svg"
icon = icon_loader.from_description(desc).get(icon)
item.setIcon(icon)
if desc.background:
background = desc.background
else:
background = DEFAULT_COLOR
background = NAMED_COLORS.get(background, background)
brush = QBrush(QColor(background))
item.setData(brush, self.BACKGROUND_ROLE)
tooltip = desc.description if desc.description else desc.name
item.setToolTip(tooltip)
item.setFlags(Qt.ItemIsEnabled)
item.setData(desc, self.CATEGORY_DESC_ROLE)
return item
def _widget_desc_to_std_item(self, desc, category):
"""
Create a QStandardItem for the widget description.
"""
item = QStandardItem(desc.name)
item.setText(desc.name)
if desc.icon:
icon = desc.icon
else:
icon = "icons/default-widget.svg"
icon = icon_loader.from_description(desc).get(icon)
item.setIcon(icon)
# This should be inherited from the category.
background = None
if desc.background:
background = desc.background
elif category.background:
background = category.background
else:
background = DEFAULT_COLOR
if background is not None:
background = NAMED_COLORS.get(background, background)
brush = QBrush(QColor(background))
item.setData(brush, self.BACKGROUND_ROLE)
tooltip = tooltip_helper(desc)
style = "ul { margin-top: 1px; margin-bottom: 1px; }"
tooltip = TOOLTIP_TEMPLATE.format(style=style, tooltip=tooltip)
item.setToolTip(tooltip)
item.setWhatsThis(whats_this_helper(desc))
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
item.setData(desc, self.WIDGET_DESC_ROLE)
# Create the action for the widget_item
action = self.create_action_for_item(item)
item.setData(action, self.WIDGET_ACTION_ROLE)
return item
TOOLTIP_TEMPLATE = """\
<html>
<head>
<style type="text/css">
{style}
</style>
</head>
<body>
{tooltip}
</body>
</html>
"""
def tooltip_helper(desc):
"""Widget tooltip construction helper.
"""
tooltip = []
tooltip.append("<b>{name}</b>".format(name=escape(desc.name)))
if desc.project_name:
# tooltip[0] += " (from {0})".format(desc.project_name)
tooltip[0] += " ({0}提供)".format("商智通")
if desc.description:
tooltip.append("{0}".format(
escape(desc.description)))
inputs_fmt = "<li>{name}</li>"
if desc.inputs:
inputs = "".join(inputs_fmt.format(name=inp.name)
for inp in desc.inputs)
tooltip.append("输入:<ul>{0}</ul>".format(inputs))
else:
tooltip.append("无输入")
if desc.outputs:
outputs = "".join(inputs_fmt.format(name=out.name)
for out in desc.outputs)
tooltip.append("输出:<ul>{0}</ul>".format(outputs))
else:
tooltip.append("无输出")
return "<hr/>".join(tooltip)
def whats_this_helper(desc, include_more_link=False):
"""
A `What's this` text construction helper. If `include_more_link` is
True then the text will include a `more...` link.
"""
title = desc.name
help_url = desc.help
if not help_url:
help_url = "help://search?" + urlencode({"id": desc.qualified_name})
description = desc.description
template = ["<h3>{0}</h3>".format(escape(title))]
if description:
template.append("<p>{0}</p>".format(escape(description)))
if help_url and include_more_link:
template.append("<a href='{0}'>更多...</a>".format(escape(help_url)))
return "\n".join(template)
def run_discovery(entry_points_iter, cached=False):
"""
Run the default discovery and return an instance of
:class:`QtWidgetRegistry`.
"""
reg_cache = {}
if cached:
reg_cache = cache.registry_cache()
discovery = QtWidgetDiscovery(cached_descriptions=reg_cache)
registry = QtWidgetRegistry()
discovery.found_category.connect(registry.register_category)
discovery.found_widget.connect(registry.register_widget)
discovery.run()
if cached:
cache.save_registry_cache(reg_cache)
return registry | PypiClean |
/ExifRead-nocycle-3.0.1.tar.gz/ExifRead-nocycle-3.0.1/exifread/heic.py |
# As of 2019, the latest standard seems to be "ISO/IEC 14496-12:2015"
# There are many different related standards. (quicktime, mov, mp4, etc...)
# See https://en.wikipedia.org/wiki/ISO_base_media_file_format for more details.
# We parse just enough of the ISO format to locate the Exif data in the file.
# Inside the 'meta' box are two directories we need:
# 1) the 'iinf' box contains 'infe' records, we look for the item_id for 'Exif'.
# 2) once we have the item_id, we find a matching entry in the 'iloc' box, which
# gives us position and size information.
import struct
from typing import List, Dict, Callable, BinaryIO, Optional
from .exif_log import get_logger
logger = get_logger()
class WrongBox(Exception):
pass
class NoParser(Exception):
pass
class BoxVersion(Exception):
pass
class BadSize(Exception):
pass
class Box:
version = 0
minor_version = 0
item_count = 0
size = 0
after = 0
pos = 0
compat = [] # type: List
base_offset = 0
# this is full of boxes, but not in a predictable order.
subs = {} # type: Dict[str, Box]
locs = {} # type: Dict
exif_infe = None # type: Optional[Box]
item_id = 0
item_type = b''
item_name = b''
item_protection_index = 0
major_brand = b''
offset_size = 0
length_size = 0
base_offset_size = 0
index_size = 0
flags = 0
def __init__(self, name: str):
self.name = name
def __repr__(self) -> str:
return "<box '%s'>" % self.name
def set_sizes(self, offset: int, length: int, base_offset: int, index: int):
self.offset_size = offset
self.length_size = length
self.base_offset_size = base_offset
self.index_size = index
def set_full(self, vflags: int):
"""
ISO boxes come in 'old' and 'full' variants.
The 'full' variant contains version and flags information.
"""
self.version = vflags >> 24
self.flags = vflags & 0x00ffffff
class HEICExifFinder:
def __init__(self, file_handle: BinaryIO):
self.file_handle = file_handle
def get(self, nbytes: int) -> bytes:
read = self.file_handle.read(nbytes)
if not read:
raise EOFError
if len(read) != nbytes:
msg = "get(nbytes={nbytes}) found {read} bytes at position {pos}".format(
nbytes=nbytes,
read=len(read),
pos=self.file_handle.tell()
)
raise BadSize(msg)
return read
def get16(self) -> int:
return struct.unpack('>H', self.get(2))[0]
def get32(self) -> int:
return struct.unpack('>L', self.get(4))[0]
def get64(self) -> int:
return struct.unpack('>Q', self.get(8))[0]
def get_int4x2(self) -> tuple:
num = struct.unpack('>B', self.get(1))[0]
num0 = num >> 4
num1 = num & 0xf
return num0, num1
def get_int(self, size: int) -> int:
"""some fields have variant-sized data."""
if size == 2:
return self.get16()
if size == 4:
return self.get32()
if size == 8:
return self.get64()
if size == 0:
return 0
raise BadSize(size)
def get_string(self) -> bytes:
read = []
while 1:
char = self.get(1)
if char == b'\x00':
break
read.append(char)
return b''.join(read)
def next_box(self) -> Box:
pos = self.file_handle.tell()
size = self.get32()
kind = self.get(4).decode('ascii')
box = Box(kind)
if size == 0:
# signifies 'to the end of the file', we shouldn't see this.
raise NotImplementedError
if size == 1:
# 64-bit size follows type.
size = self.get64()
box.size = size - 16
box.after = pos + size
else:
box.size = size - 8
box.after = pos + size
box.pos = self.file_handle.tell()
return box
def get_full(self, box: Box):
box.set_full(self.get32())
def skip(self, box: Box):
self.file_handle.seek(box.after)
def expect_parse(self, name: str) -> Box:
while True:
box = self.next_box()
if box.name == name:
return self.parse_box(box)
self.skip(box)
def get_parser(self, box: Box) -> Callable:
defs = {
'ftyp': self._parse_ftyp,
'meta': self._parse_meta,
'infe': self._parse_infe,
'iinf': self._parse_iinf,
'iloc': self._parse_iloc,
}
try:
return defs[box.name]
except IndexError as err:
raise NoParser(box.name) from err
def parse_box(self, box: Box) -> Box:
probe = self.get_parser(box)
probe(box)
# in case anything is left unread
self.file_handle.seek(box.after)
return box
def _parse_ftyp(self, box: Box):
box.major_brand = self.get(4)
box.minor_version = self.get32()
box.compat = []
size = box.size - 8
while size > 0:
box.compat.append(self.get(4))
size -= 4
def _parse_meta(self, meta: Box):
self.get_full(meta)
while self.file_handle.tell() < meta.after:
box = self.next_box()
psub = self.get_parser(box)
if psub is not None:
psub(box)
meta.subs[box.name] = box
else:
logger.debug('HEIC: skipping %r', box)
# skip any unparsed data
self.skip(box)
def _parse_infe(self, box: Box):
self.get_full(box)
if box.version >= 2:
if box.version == 2:
box.item_id = self.get16()
elif box.version == 3:
box.item_id = self.get32()
box.item_protection_index = self.get16()
box.item_type = self.get(4)
box.item_name = self.get_string()
# ignore the rest
def _parse_iinf(self, box: Box):
self.get_full(box)
count = self.get16()
box.exif_infe = None
for _ in range(count):
infe = self.expect_parse('infe')
if infe.item_type == b'Exif':
logger.debug("HEIC: found Exif 'infe' box")
box.exif_infe = infe
break
def _parse_iloc(self, box: Box):
self.get_full(box)
size0, size1 = self.get_int4x2()
size2, size3 = self.get_int4x2()
box.set_sizes(size0, size1, size2, size3)
if box.version < 2:
box.item_count = self.get16()
elif box.version == 2:
box.item_count = self.get32()
else:
raise BoxVersion(2, box.version)
box.locs = {}
logger.debug('HEIC: %d iloc items', box.item_count)
for _ in range(box.item_count):
if box.version < 2:
item_id = self.get16()
elif box.version == 2:
item_id = self.get32()
else:
# notreached
raise BoxVersion(2, box.version)
if box.version in (1, 2):
# ignore construction_method
self.get16()
# ignore data_reference_index
self.get16()
box.base_offset = self.get_int(box.base_offset_size)
extent_count = self.get16()
extents = []
for _ in range(extent_count):
if box.version in (1, 2) and box.index_size > 0:
self.get_int(box.index_size)
extent_offset = self.get_int(box.offset_size)
extent_length = self.get_int(box.length_size)
extents.append((extent_offset, extent_length))
box.locs[item_id] = extents
def find_exif(self) -> tuple:
ftyp = self.expect_parse('ftyp')
assert ftyp.major_brand == b'heic'
assert ftyp.minor_version == 0
meta = self.expect_parse('meta')
assert meta.subs['iinf'].exif_infe is not None
item_id = meta.subs['iinf'].exif_infe.item_id
extents = meta.subs['iloc'].locs[item_id]
logger.debug('HEIC: found Exif location.')
# we expect the Exif data to be in one piece.
assert len(extents) == 1
pos, _ = extents[0]
# looks like there's a kind of pseudo-box here.
self.file_handle.seek(pos)
# the payload of "Exif" item may be start with either
# b'\xFF\xE1\xSS\xSSExif\x00\x00' (with APP1 marker, e.g. Android Q)
# or
# b'Exif\x00\x00' (without APP1 marker, e.g. iOS)
# according to "ISO/IEC 23008-12, 2017-12", both of them are legal
exif_tiff_header_offset = self.get32()
assert exif_tiff_header_offset >= 6
assert self.get(exif_tiff_header_offset)[-6:] == b'Exif\x00\x00'
offset = self.file_handle.tell()
endian = self.file_handle.read(1)
return offset, endian | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/printing/codeprinter.py | from __future__ import annotations
import typing
from ..core import Add, Basic, Integer, Lambda, Mul, Pow, Symbol
from ..core.mul import _keep_coeff
from ..core.relational import Relational
from ..core.sympify import sympify
from ..utilities import default_sort_key
from .precedence import precedence
from .str import StrPrinter
class AssignmentError(Exception):
"""Raised if an assignment variable for a loop is missing."""
class Assignment(Relational):
"""
Represents variable assignment for code generation.
Parameters
==========
lhs : Expr
Diofant object representing the lhs of the expression. These should be
singular objects, such as one would use in writing code. Notable types
include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
subclass these types are also supported.
rhs : Expr
Diofant object representing the rhs of the expression. This can be any
type, provided its shape corresponds to that of the lhs. For example,
a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
the dimensions will not align.
Examples
========
>>> Assignment(x, y)
x := y
>>> Assignment(x, 0)
x := 0
>>> A = MatrixSymbol('A', 1, 3)
>>> mat = Matrix([x, y, z]).T
>>> Assignment(A, mat)
A := Matrix([[x, y, z]])
>>> Assignment(A[0, 1], x)
A[0, 1] := x
"""
rel_op = ':='
def __new__(cls, lhs, rhs=0, **assumptions): # pylint: disable=signature-differs
from ..matrices.expressions.matexpr import MatrixElement, MatrixSymbol
from ..tensor import Indexed
lhs = sympify(lhs, strict=True)
rhs = sympify(rhs, strict=True)
# Tuple of things that can be on the lhs of an assignment
assignable = (Symbol, MatrixSymbol, MatrixElement, Indexed)
if not isinstance(lhs, assignable):
raise TypeError('Cannot assign to lhs of type %s.' % type(lhs))
# Indexed types implement shape, but don't define it until later. This
# causes issues in assignment validation. For now, matrices are defined
# as anything with a shape that is not an Indexed
lhs_is_mat = hasattr(lhs, 'shape') and not isinstance(lhs, Indexed)
rhs_is_mat = hasattr(rhs, 'shape') and not isinstance(rhs, Indexed)
# If lhs and rhs have same structure, then this assignment is ok
if lhs_is_mat:
if not rhs_is_mat:
raise ValueError('Cannot assign a scalar to a matrix.')
if lhs.shape != rhs.shape:
raise ValueError("Dimensions of lhs and rhs don't align.")
elif rhs_is_mat and not lhs_is_mat:
raise ValueError('Cannot assign a matrix to a scalar.')
return Relational.__new__(cls, lhs, rhs, **assumptions)
class CodePrinter(StrPrinter):
"""The base class for code-printing subclasses."""
_operators = {
'and': '&&',
'or': '||',
'not': '!',
}
_default_settings: dict[str, typing.Any] = {
'order': None,
'full_prec': 'auto',
'error_on_reserved': False,
'reserved_word_suffix': '_'
}
def __init__(self, settings=None):
super().__init__(settings=settings)
self.reserved_words = set()
def doprint(self, expr, assign_to=None):
"""
Print the expression as code.
Parameters
==========
expr : Expression
The expression to be printed.
assign_to : Symbol, MatrixSymbol, or string (optional)
If provided, the printed code will set the expression to a
variable with name ``assign_to``.
"""
from ..matrices import MatrixSymbol
if isinstance(assign_to, str):
if expr.is_Matrix:
assign_to = MatrixSymbol(assign_to, *expr.shape)
else:
assign_to = Symbol(assign_to)
elif not isinstance(assign_to, (Basic, type(None))):
raise TypeError(f'{type(self).__name__} cannot assign to object of type {type(assign_to)}')
if assign_to:
expr = Assignment(assign_to, expr)
else:
# non-strict sympify is not enough b/c it errors on iterables
expr = sympify(expr)
# keep a set of expressions that are not strictly translatable to Code
# and number constants that must be declared and initialized
self._not_supported = set() # pylint: disable=attribute-defined-outside-init
self._number_symbols = set() # pylint: disable=attribute-defined-outside-init
lines = self._print(expr).splitlines()
# format the output
if self._settings['human']:
frontlines = []
if len(self._not_supported) > 0:
frontlines.append(self._get_comment(
f'Not supported in {self.language}:'))
for expr in sorted(self._not_supported, key=str):
frontlines.append(self._get_comment(type(expr).__name__))
for name, value in sorted(self._number_symbols, key=str):
frontlines.append(self._declare_number_const(name, value))
lines = frontlines + lines
lines = self._format_code(lines)
result = '\n'.join(lines)
else:
lines = self._format_code(lines)
result = (self._number_symbols, self._not_supported,
'\n'.join(lines))
del self._not_supported
del self._number_symbols
return result
def _doprint_loops(self, expr, assign_to=None):
# Here we print an expression that contains Indexed objects, they
# correspond to arrays in the generated code. The low-level implementation
# involves looping over array elements and possibly storing results in temporary
# variables or accumulate it in the assign_to object.
assert self._settings['contract']
from ..tensor import get_contraction_structure
# Setup loops over non-dummy indices -- all terms need these
indices = self._get_expression_indices(expr, assign_to)
# Setup loops over dummy indices -- each term needs separate treatment
dummies = get_contraction_structure(expr)
openloop, closeloop = self._get_loop_opening_ending(indices)
# terms with no summations first
if None in dummies:
text = StrPrinter.doprint(self, Add(*dummies[None]))
else:
# If all terms have summations we must initialize array to Zero
text = StrPrinter.doprint(self, 0)
# skip redundant assignments (where lhs == rhs)
lhs_printed = self._print(assign_to)
lines = []
if text != lhs_printed:
lines.extend(openloop)
assert assign_to is not None
text = self._get_statement(f'{lhs_printed} = {text}')
lines.append(text)
lines.extend(closeloop)
# then terms with summations
for d, dv in dummies.items():
if isinstance(d, tuple):
indices = self._sort_optimized(d, expr)
openloop_d, closeloop_d = self._get_loop_opening_ending(
indices)
for term in dv:
if term in dummies and not ([list(f) for f in dummies[term]]
== [[None] for f in dummies[term]]):
# If one factor in the term has it's own internal
# contractions, those must be computed first.
# (temporary variables?)
raise NotImplementedError(
'FIXME: no support for contractions in factor yet')
# We need the lhs expression as an accumulator for
# the loops, i.e
#
# for (int d=0; d < dim; d++){
# lhs[] = lhs[] + term[][d]
# } ^.................. the accumulator
#
# We check if the expression already contains the
# lhs, and raise an exception if it does, as that
# syntax is currently undefined. FIXME: What would be
# a good interpretation?
assert assign_to is not None
assert not term.has(assign_to)
lines.extend(openloop)
lines.extend(openloop_d)
text = '%s = %s' % (lhs_printed,
StrPrinter.doprint(self,
assign_to + term))
lines.append(self._get_statement(text))
lines.extend(closeloop_d)
lines.extend(closeloop)
return '\n'.join(lines)
def _get_expression_indices(self, expr, assign_to):
from ..tensor import get_indices
rinds, _ = get_indices(expr)
linds, _ = get_indices(assign_to)
# support broadcast of scalar
if linds and not rinds:
rinds = linds
if rinds != linds:
raise ValueError(f'lhs indices must match non-dummy rhs indices in {expr}')
return self._sort_optimized(rinds, assign_to)
def _sort_optimized(self, indices, expr):
from ..tensor import Indexed
if not indices:
return []
# determine optimized loop order by giving a score to each index
# the index with the highest score are put in the innermost loop.
score_table = {}
for i in indices:
score_table[i] = 0
arrays = expr.atoms(Indexed)
for arr in arrays:
for p, ind in enumerate(arr.indices):
try:
score_table[ind] += self._rate_index_position(p)
except KeyError:
pass
return sorted(indices, key=lambda x: score_table[x])
def _rate_index_position(self, p):
"""Function to calculate score based on position among indices.
This method is used to sort loops in an optimized order, see
CodePrinter._sort_optimized().
"""
raise NotImplementedError('This function must be implemented by '
'subclass of CodePrinter.')
def _get_statement(self, codestring):
"""Formats a codestring with the proper line ending."""
raise NotImplementedError('This function must be implemented by '
'subclass of CodePrinter.')
def _get_comment(self, text):
"""Formats a text string as a comment."""
raise NotImplementedError('This function must be implemented by '
'subclass of CodePrinter.')
def _declare_number_const(self, name, value):
"""Declare a numeric constant at the top of a function."""
raise NotImplementedError('This function must be implemented by '
'subclass of CodePrinter.')
def _format_code(self, lines):
"""Take in a list of lines of code, and format them accordingly.
This may include indenting, wrapping long lines, etc...
"""
raise NotImplementedError('This function must be implemented by '
'subclass of CodePrinter.')
def _get_loop_opening_ending(self, indices):
"""Returns a tuple (open_lines, close_lines) containing lists
of codelines
"""
raise NotImplementedError('This function must be implemented by '
'subclass of CodePrinter.')
def _print_Assignment(self, expr):
from ..functions import Piecewise
from ..matrices import MatrixSymbol
from ..tensor.indexed import IndexedBase
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
if isinstance(expr.rhs, Piecewise):
# Here we modify Piecewise so each expression is now
# an Assignment, and then continue on the print.
expressions = []
conditions = []
for (e, c) in rhs.args:
expressions.append(Assignment(lhs, e))
conditions.append(c)
temp = Piecewise(*zip(expressions, conditions))
return self._print(temp)
elif isinstance(lhs, MatrixSymbol):
# Here we form an Assignment for each element in the array,
# printing each one.
lines = []
for (i, j) in self._traverse_matrix_indices(lhs):
temp = Assignment(lhs[i, j], rhs[i, j])
code0 = self._print(temp)
lines.append(code0)
return '\n'.join(lines)
elif self._settings['contract'] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement(f'{lhs_code} = {rhs_code}')
def _print_Symbol(self, expr):
name = super()._print_Symbol(expr)
if name in self.reserved_words:
if self._settings['error_on_reserved']:
msg = ('This expression includes the symbol "{}" which is a '
'reserved keyword in this language.')
raise ValueError(msg.format(name))
return name + self._settings['reserved_word_suffix']
else:
return name
def _print_Function(self, expr):
if expr.func.__name__ in self.known_functions:
cond_func = self.known_functions[expr.func.__name__]
func = None
if isinstance(cond_func, str):
func = cond_func
else:
for cond, func in cond_func:
if cond(*expr.args):
break
else:
return self._print_not_supported(expr)
try:
return func(*[self.parenthesize(item, 0) for item in expr.args])
except TypeError:
return '%s(%s)' % (func, self.stringify(expr.args, ', '))
elif hasattr(expr, '_imp_') and isinstance(expr._imp_, Lambda):
# inlined function
return self._print(expr._imp_(*expr.args))
else:
return self._print_not_supported(expr)
_print_MinMaxBase = _print_Function
def _print_NumberSymbol(self, expr):
# A Number symbol that is not implemented here or with _printmethod
# is registered and evaluated
self._number_symbols.add((expr,
self._print(expr.evalf(self._settings['precision']))))
return str(expr)
def _print_Dummy(self, expr):
# dummies must be printed as unique symbols
return f'{expr.name}_{expr.dummy_index:d}' # Dummy
def _print_Catalan(self, expr):
return self._print_NumberSymbol(expr)
def _print_EulerGamma(self, expr):
return self._print_NumberSymbol(expr)
def _print_GoldenRatio(self, expr):
return self._print_NumberSymbol(expr)
def _print_Exp1(self, expr):
return self._print_NumberSymbol(expr)
def _print_Pi(self, expr):
return self._print_NumberSymbol(expr)
def _print_And(self, expr):
PREC = precedence(expr)
return (' %s ' % self._operators['and']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Or(self, expr):
PREC = precedence(expr)
return (' %s ' % self._operators['or']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Xor(self, expr):
if self._operators.get('xor') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (' %s ' % self._operators['xor']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Equivalent(self, expr):
if self._operators.get('equivalent') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (' %s ' % self._operators['equivalent']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Not(self, expr):
PREC = precedence(expr)
return self._operators['not'] + self.parenthesize(expr.args[0], PREC)
def _print_Mul(self, expr):
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = '-'
else:
sign = ''
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order != 'none':
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
else:
a.append(item)
a = a or [Integer(1)]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
if len(b) == 0:
return sign + '*'.join(a_str)
elif len(b) == 1:
return sign + '*'.join(a_str) + '/' + b_str[0]
else:
return sign + '*'.join(a_str) + '/(%s)' % '*'.join(b_str)
def _print_not_supported(self, expr):
self._not_supported.add(expr)
return self.emptyPrinter(expr)
# The following can not be simply translated into C or Fortran
_print_Basic = _print_not_supported
_print_ComplexInfinity = _print_not_supported
_print_Derivative = _print_not_supported
_print_dict = _print_not_supported
_print_ExprCondPair = _print_not_supported
_print_GeometryEntity = _print_not_supported
_print_Infinity = _print_not_supported
_print_Integral = _print_not_supported
_print_Interval = _print_not_supported
_print_Limit = _print_not_supported
_print_list = _print_not_supported
_print_MatrixBase = _print_not_supported
_print_NaN = _print_not_supported
_print_NegativeInfinity = _print_not_supported
_print_Normal = _print_not_supported
_print_Order = _print_not_supported
_print_RootOf = _print_not_supported
_print_RootsOf = _print_not_supported
_print_RootSum = _print_not_supported
_print_tuple = _print_not_supported
_print_Wild = _print_not_supported
_print_WildFunction = _print_not_supported | PypiClean |
/DataExtractionTiket-0.0.1.tar.gz/DataExtractionTiket-0.0.1/__init__.py | import requests
import pandas as pd
import json
from datetime import datetime
from google.cloud.sql.connector import connector
from threading import Timer
import mysql.connector
def data_extraction():
end_date = datetime.today().strftime('%Y-%m-%d')
print("test")
print(end_date)
print("test")
#URL FOR BALI TO CENTRAL JAVA:
#url = "https://destinationinsights.withgoogle.com/data/daily?origin_country=ID&destination_country=ID&travel_type=FLIGHT&trip_type=DOMESTIC&date_start=2021-04-07&date_end=" + end_date + "&destination_admin_area=Central%20Java&origin_admin_area=Bali"
#URL FOR JAKARTA TO EAST JAVA:
url = "https://destinationinsights.withgoogle.com/data/daily?origin_country=ID&destination_country=ID&travel_type=FLIGHT&trip_type=DOMESTIC&date_start=2021-04-07&date_end=" + end_date + "&destination_admin_area=East%20Java&origin_admin_area=Jakarta"
r = requests.get(url)
data = json.loads(r.text)
var = json.dumps(data['daily_travel_demand'])
df_json = pd.read_json(var)
#creating CSV file:
df_json.to_csv('test_pandas.csv')
#file uploading to SQL server:
read_file = pd.read_csv(r'/Users/juliantanja/PycharmProjects/INTERNSHIP2.1/test_pandas.csv')
df = pd.DataFrame(read_file, columns= ['date', 'current_market_queries', 'last_year_market_queries'])
print(df)
#Creating an SQL database:
my_db = mysql.connector.connect(host="localhost", user="root", password="Singap0r3", database="tiketinternship")
cursor = my_db.cursor()
#Deleting old data/emptying database:
print("deleting all rows in table")
delete_sql = "DELETE FROM data_insights2"
cursor.execute(delete_sql)
print("deleting data success")
#Inserting CSV file into SQL database TABLE:
for i, row in df.iterrows():
print(i, row[0], row[1], row[2])
sql = "INSERT INTO data_insights2 (Nomor, Tanggal, current_queries, lastyear_queries) VALUES (%s, %s, %s, %s)"
val = (i, row[0], row[1], row[2])
cursor.execute(sql, val)
print("Record inserted.")
my_db.commit()
print("Success")
#schedule.every().day.at("01:00").do(data_extraction)
#while True:
#schedule.run_pending()
#time.sleep(1)
#origin jawa timur/jakarta/jawabarat --> destination all destinations. ALL IN ONE GRAPH
#baseline all indonesia to all indonesia. | PypiClean |
/GailBot_Testing_Suite-0.1a8-py3-none-any.whl/gailbot/services/converter/payload/transcribedDirPayload.py | from typing import Union
import os
from .payloadObject import PayLoadObject, PayLoadStatus
from ...organizer.source import SourceObject
from gailbot.core.utils.general import is_directory, is_file, copy
from gailbot.core.utils.logger import makelogger
from gailbot.workspace.manager import WorkspaceManager
from gailbot.configs import service_config_loader, workspace_config_loader
HIDDEN_FILE = service_config_loader().directory_name.hidden_file
OUTPUT_RESULT = workspace_config_loader().get_output_structure().transcribe_result
logger = makelogger("transcribed_dir_payload")
MERGED_FILE_NAME = "merged"
def load_transcribed_dir_payload(
source: SourceObject, ws_manager: WorkspaceManager) -> Union[bool, PayLoadObject]:
""" given a source object, convert it into an PayloadObject
if the source stores a gailbot output directory that contains
the gailbot output result
Args:
source (SourceObject): an instance of SourceObject that stores the
datafile and setting of the transcription
Returns:
Union[bool, List[PayLoadObject]]: return the converted payload if the
conversion is successful, return false other wise
"""
if not source.setting:
return False
if not TranscribedDirPayload.is_supported(source.source_path()):
return False
try:
return [TranscribedDirPayload(source, ws_manager)]
except Exception as e:
logger.error(e, exc_info=e)
return False
class TranscribedDirPayload(PayLoadObject):
"""
Class for a transcribed directory payload
"""
def __init__(self, source: SourceObject, ws_manager: WorkspaceManager) -> None:
super().__init__(source, ws_manager)
if not self.transcription_result.load_result(
os.path.join(self.data_files[0], OUTPUT_RESULT)):
logger.error("result cannot be loaded")
self.status = PayLoadStatus.INITIALIZED
@staticmethod
def is_supported(file_path: str) -> bool:
"""
Determines if a given file path has a supported file extension
Args:
file_path: str: name of the file path to check
Returns:
bool: True if the file path is supported, false if not
"""
if not is_directory(file_path):
return False
return is_file(os.path.join(file_path, HIDDEN_FILE))
def _copy_file(self) -> None:
"""
Copies file to workspace
"""
tgt_path = os.path.join(self.workspace.data_copy, f"{self.name}")
copy(self.original_source, tgt_path)
self.data_files = [tgt_path]
def _set_initial_status(self) -> None:
"""
Sets the initial status of the payload object to initialized
"""
self.status = PayLoadStatus.TRANSCRIBED
def _merge_audio(self):
try:
for root, dirs, files in os.walk(self.original_source):
for file in files:
file_name, file_extension = os.path.splitext(file)
if file_name == MERGED_FILE_NAME:
logger.info(file)
merged_path = copy(os.path.join(root,file), self.out_dir.media_file)
self.merged_audio = merged_path
assert merged_path
break
except Exception as e:
logger.error(e, exc_info=e)
return False
@staticmethod
def supported_format() -> str:
"""
Contains and accesses a list of the supported formats
"""
return "transcribed directory"
def __repr__(self) -> str:
return "Transcribed directory payload" | PypiClean |
/Caboodle-1.0.2.tar.gz/Caboodle-1.0.2/docs/libraries/d3cryp7.md | d3cryp7
=======
d3cryp7 Library
The d3cryp7 library defines functions to interact with d3cryp7's web API.
See the unit tests for this library for more information.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
**Functions:**
--------------
#### recognize_image(url, image)
Posts an image to be recognized
**Args:**
| Name | Type | Description |
|-------|--------|----------------------------------|
| url | String | The URL of the d3cryp7 API |
| image | Bytes | The base64 encoded image to post |
**Returns:**
| Type | Description |
|--------|-------------------------------------------|
| String | A string containing the text in the image |
**Raises:**
| Name | Description |
|--------------|-----------------|
| RuntimeError | The post failed |
#### tag_image(url, image)
Posts an image to be tagged
**Args:**
| Name | Type | Description |
|-------|--------|----------------------------------|
| url | String | The URL of the d3cryp7 API |
| image | Bytes | The base64 encoded image to post |
**Returns:**
| Type | Description |
|------------|----------------------------------------------|
| Dictionary | A dictionary of tags and their probabilities |
**Raises:**
| Name | Description |
|--------------|-----------------|
| RuntimeError | The post failed |
#### tag_image_grid(url, image, col, row)
Posts an image grid to be tagged
**Args:**
| Name | Type | Description |
|-------|---------|------------------------------------|
| url | String | The URL of the d3cryp7 API |
| image | Bytes | The base64 encoded image to post |
| col | Integer | The number of columns in the image |
| row | Integer | The number of rows in the image |
**Returns:**
| Type | Description |
|-------|---------------------------------------------------------|
| Tuple | A tuple of dictionaries of tags and their probabilities |
**Raises:**
| Name | Description |
|--------------|-----------------|
| RuntimeError | The post failed |
#### generate_bbs(w, h, col, row)
Generates a tuple of bounding boxes to crop a grid of images
**Args:**
| Name | Type | Description |
|---- -|---------|-----------------------------------|
| w | Integer | The width of an image |
| h | Integer | The height of an image |
| col | Integer | The number of columns in an image |
| row | Integer | The number of rows in an image |
**Returns:**
| Type | Description |
|-------|-------------------------------------------|
| Tuple | A tuple of tuples that contain 4 integers |
#### get_rate(url, currency)
Calculates the solving rate in any currency
**Args:**
| Name | Type | Description |
|----------|--------|----------------------------|
| url | String | The URL of the d3cryp7 API |
| currency | String | The currency to convert to |
**Returns:**
| Type | Description |
|------------|---------------------------------------------------------|
| Dictionary | A dictionary of solving rates as floating point numbers |
The currency must be a three letter code like USD or EUR.
#### success(url, id)
Reports a posted image as successful
**Args:**
| Name | Type | Description |
|------|--------|----------------------------|
| url | String | The URL of the d3cryp7 API |
| id | String | The ID of the posted image |
#### invalid(url, id)
Reports a posted image as invalid
**Args:**
| Name | Type | Description |
|------|--------|----------------------------|
| url | String | The URL of the d3cryp7 API |
| id | String | The ID of the posted image |
| PypiClean |
/564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/custom_scalar.py | from __future__ import annotations
import sys
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Any,
Callable,
Iterable,
Mapping,
NewType,
Optional,
Type,
TypeVar,
Union,
overload,
)
from strawberry.exceptions import InvalidUnionTypeError
from strawberry.type import StrawberryOptional, StrawberryType
from .utils.str_converters import to_camel_case
if TYPE_CHECKING:
from graphql import GraphQLScalarType
# in python 3.10+ NewType is a class
if sys.version_info >= (3, 10):
_T = TypeVar("_T", bound=Union[type, NewType])
else:
_T = TypeVar("_T", bound=type)
def identity(x: _T) -> _T:
return x
@dataclass
class ScalarDefinition(StrawberryType):
name: str
description: Optional[str]
specified_by_url: Optional[str]
serialize: Optional[Callable]
parse_value: Optional[Callable]
parse_literal: Optional[Callable]
directives: Iterable[object] = ()
# Optionally store the GraphQLScalarType instance so that we don't get
# duplicates
implementation: Optional[GraphQLScalarType] = None
# used for better error messages
_source_file: Optional[str] = None
_source_line: Optional[int] = None
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
) -> Union[StrawberryType, type]:
return super().copy_with(type_var_map) # type: ignore[safe-super]
@property
def is_generic(self) -> bool:
return False
class ScalarWrapper:
_scalar_definition: ScalarDefinition
def __init__(self, wrap: Callable[[Any], Any]):
self.wrap = wrap
def __call__(self, *args, **kwargs):
return self.wrap(*args, **kwargs)
def __or__(self, other: Union[StrawberryType, type]) -> StrawberryType:
if other is None:
# Return the correct notation when using `StrawberryUnion | None`.
return StrawberryOptional(of_type=self)
# Raise an error in any other case.
# There is Work in progress to deal with more merging cases, see:
# https://github.com/strawberry-graphql/strawberry/pull/1455
raise InvalidUnionTypeError(str(other), self.wrap)
def _process_scalar(
cls: Type[_T],
*,
name: Optional[str] = None,
description: Optional[str] = None,
specified_by_url: Optional[str] = None,
serialize: Optional[Callable] = None,
parse_value: Optional[Callable] = None,
parse_literal: Optional[Callable] = None,
directives: Iterable[object] = (),
):
from strawberry.exceptions.handler import should_use_rich_exceptions
name = name or to_camel_case(cls.__name__)
_source_file = None
_source_line = None
if should_use_rich_exceptions():
frame = sys._getframe(3)
_source_file = frame.f_code.co_filename
_source_line = frame.f_lineno
wrapper = ScalarWrapper(cls)
wrapper._scalar_definition = ScalarDefinition(
name=name,
description=description,
specified_by_url=specified_by_url,
serialize=serialize,
parse_literal=parse_literal,
parse_value=parse_value,
directives=directives,
_source_file=_source_file,
_source_line=_source_line,
)
return wrapper
@overload
def scalar(
*,
name: Optional[str] = None,
description: Optional[str] = None,
specified_by_url: Optional[str] = None,
serialize: Callable = identity,
parse_value: Optional[Callable] = None,
parse_literal: Optional[Callable] = None,
directives: Iterable[object] = (),
) -> Callable[[_T], _T]:
...
@overload
def scalar(
cls: _T,
*,
name: Optional[str] = None,
description: Optional[str] = None,
specified_by_url: Optional[str] = None,
serialize: Callable = identity,
parse_value: Optional[Callable] = None,
parse_literal: Optional[Callable] = None,
directives: Iterable[object] = (),
) -> _T:
...
# FIXME: We are tricking pyright into thinking that we are returning the given type
# here or else it won't let us use any custom scalar to annotate attributes in
# dataclasses/types. This should be properly solved when implementing StrawberryScalar
def scalar(
cls=None,
*,
name: Optional[str] = None,
description: Optional[str] = None,
specified_by_url: Optional[str] = None,
serialize: Callable = identity,
parse_value: Optional[Callable] = None,
parse_literal: Optional[Callable] = None,
directives: Iterable[object] = (),
) -> Any:
"""Annotates a class or type as a GraphQL custom scalar.
Example usages:
>>> strawberry.scalar(
>>> datetime.date,
>>> serialize=lambda value: value.isoformat(),
>>> parse_value=datetime.parse_date
>>> )
>>> Base64Encoded = strawberry.scalar(
>>> NewType("Base64Encoded", bytes),
>>> serialize=base64.b64encode,
>>> parse_value=base64.b64decode
>>> )
>>> @strawberry.scalar(
>>> serialize=lambda value: ",".join(value.items),
>>> parse_value=lambda value: CustomList(value.split(","))
>>> )
>>> class CustomList:
>>> def __init__(self, items):
>>> self.items = items
"""
if parse_value is None:
parse_value = cls
def wrap(cls):
return _process_scalar(
cls,
name=name,
description=description,
specified_by_url=specified_by_url,
serialize=serialize,
parse_value=parse_value,
parse_literal=parse_literal,
directives=directives,
)
if cls is None:
return wrap
return wrap(cls) | PypiClean |
/MeneTools-3.3.0-py3-none-any.whl/menetools/meneseed.py |
# Copyright (C) 2017-2023 Clémence Frioux & Arnaud Belcour - Inria Dyliss - Pleiade
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import json
import logging
import sys
from menetools import query, sbml
from xml.etree.ElementTree import ParseError
logger = logging.getLogger('menetools.meneseed')
def run_meneseed(draft_sbml, output=None):
"""
Identify seeds in a metabolic network.
Args:
draft_sbml (str): SBML metabolic network file
output (str): path to json output file
Returns:
dictionary: seeds compounds
"""
logger.info(f'Reading draft network from {draft_sbml}')
try:
draftnet = sbml.readSBMLnetwork_clyngor(draft_sbml, 'draft')
except FileNotFoundError:
logger.critical(f'File not found: {draft_sbml}')
sys.exit(1)
except ParseError:
logger.critical(f'Invalid syntax in SBML file: {draft_sbml}')
sys.exit(1)
logger.info('\nChecking draft network exchange reactions')
sys.stdout.flush()
model = query.get_seed(draftnet)
seeds = []
for pred in model:
if pred == "seed":
[seeds.append(a[0]) for a in model[pred, 1]]
results = {'seeds': seeds}
logger.info(
f"{len(seeds)} seed metabolites (related to exchange reactions):"
)
logger.info('\n'.join(seeds))
if output:
with open(output, "w") as output_file:
json.dump(results, output_file, indent=True, sort_keys=True)
return results | PypiClean |
/BIT_framework-0.0.2-py3-none-any.whl/BIT_DL/pytorch/data/tokenizers/bert_tokenizer_utils.py | import collections
from typing import Dict, List, Optional, Tuple
import unicodedata
__all__ = [
"load_vocab",
"BasicTokenizer",
"WordpieceTokenizer",
]
def load_vocab(vocab_file: str) -> Dict[str, int]:
r"""Loads a vocabulary file into a dictionary."""
vocab: Dict[str, int] = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip('\n')
vocab[token] = index
return vocab
class BasicTokenizer:
r"""Runs basic tokenization (punctuation splitting, lower casing, etc.).
Args:
do_lower_case: Whether to lower case the input.
never_split: A list of tokens not to split.
tokenize_chinese_chars: Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see:
`https://github.com/huggingface/pytorch-pretrained-BERT/issues/328`
"""
def __init__(self, do_lower_case: bool = True,
never_split: Optional[List[str]] = None,
tokenize_chinese_chars: bool = True):
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
self.tokenize_chinese_chars = tokenize_chinese_chars
def tokenize(self, text: str,
never_split: Optional[List[str]] = None) -> \
List[str]:
r"""Basic tokenization of a piece of text.
Split on white spaces only, for sub-word tokenization, see
WordPieceTokenizer.
Args:
text: An input string.
never_split: A list of tokens not to split.
"""
never_split = self.never_split + (never_split
if never_split is not None else [])
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
# see: https://github.com/google-research/bert/blob/master/
# tokenization.py#L201
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
@classmethod
def _run_strip_accents(cls, text: str) -> str:
r"""Strips accents from a piece of text.
Example:
accented_string = 'Málaga'
_run_strip_accents(accented_string) # 'Malaga'
"""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
@classmethod
def _run_split_on_punc(cls, text: str,
never_split: Optional[List[str]] = None) -> \
List[str]:
r"""Splits punctuation on a piece of text.
Example:
text = 'Texar-PyTorch is an open-source toolkit based on PyTorch.'
_run_split_on_punc(text)
# ['Texar', '-', 'PyTorch is an open', '-',
# 'source toolkit based on PyTorch', '.']
"""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text: str) -> str:
r"""Adds whitespace around any CJK character.
Example:
text = '今天天气不错'
_tokenize_chinese_chars(text)
# ' 今 天 天 气 不 错 '
"""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
@classmethod
def _is_chinese_char(cls, cp: int) -> bool:
r"""Checks whether cp is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode
# block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean
# characters, despite its name. The modern Korean Hangul alphabet is a
# different block, as is Japanese Hiragana and Katakana. Those
# alphabets are used to write space-separated words, so they are not
# treated specially and handled like the all of the other languages.
if ((0x4E00 <= cp <= 0x9FFF) or
(0x3400 <= cp <= 0x4DBF) or
(0x20000 <= cp <= 0x2A6DF) or
(0x2A700 <= cp <= 0x2B73F) or
(0x2B740 <= cp <= 0x2B81F) or
(0x2B820 <= cp <= 0x2CEAF) or
(0xF900 <= cp <= 0xFAFF) or
(0x2F800 <= cp <= 0x2FA1F)):
return True
return False
@classmethod
def _clean_text(cls, text: str) -> str:
r"""Performs invalid character removal and whitespace cleanup on text.
Example:
text = 'Texar-PyTorch\tis an open-source\ntoolkit based on PyTorch.'
_clean_text(text)
# 'Texar-PyTorch is an open-source toolkit based on PyTorch.'
"""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer:
r"""Runs WordPiece tokenization."""
def __init__(self, vocab: Dict[str, int],
unk_token: str,
max_input_chars_per_word: int = 100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize_with_span(self, text: str) -> List[Tuple[str, int, int]]:
r"""Tokenizes a piece of text into its word pieces with span info.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = [("un", 0, 2), ("##aff", 2, 5), ("##able", 5, 9)]
Args:
text: A single token or whitespace separated tokens. This should
have already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens with span information (begin, end).
"""
output_tokens_and_span: List[Tuple[str, int, int]] = []
token_start = 0
for token in whitespace_tokenize(text):
assert token is not None
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens_and_span.append((self.unk_token,
token_start,
token_start + len(chars)))
continue
is_bad = False
start = 0
sub_tokens_and_span: List[Tuple[str, int, int]] = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens_and_span.append((cur_substr,
token_start + start,
token_start + end))
start = end
if is_bad:
output_tokens_and_span.append((self.unk_token,
token_start,
token_start + len(chars)))
else:
output_tokens_and_span.extend(sub_tokens_and_span)
token_start += len(chars) + 1
return output_tokens_and_span
def tokenize(self, text: str) -> List[str]:
"""
Tokenizes a piece of text into its word pieces.
This calls self.tokenize_with_span to extract tokens with span info,
then extracts only tokens to form a list.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should
have already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens_and_span = self.tokenize_with_span(text)
output_tokens: List[str] = []
for tokens_with_span in output_tokens_and_span:
output_tokens.append(tokens_with_span[0])
return output_tokens
def whitespace_tokenize(text: str) -> List[str]:
r"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens: List[str] = text.split()
return tokens
def _is_whitespace(char: str) -> bool:
r"""Checks whether `char` is a whitespace character.
Note: this function is not standard and should be considered for BERT
tokenization only. See the comments for more details.
"""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char in (" ", "\t", "\n", "\r"):
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char: str) -> bool:
r"""Checks whether `char` is a control character.
Note: this function is not standard and should be considered for BERT
tokenization only. See the comments for more details.
"""
# These are technically control characters but we count them as whitespace
# characters.
if char in ("\t", "\n", "\r"):
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char: str) -> bool:
r"""Checks whether `char` is a punctuation character.
Note: this function is not standard and should be considered for BERT
tokenization only. See the comments for more details.
"""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((33 <= cp <= 47) or (58 <= cp <= 64) or
(91 <= cp <= 96) or (123 <= cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/lang/sk.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.lang['sk']={"dir":"ltr","editor":"Editor formátovaného textu","common":{"editorHelp":"Stlačte ALT 0 pre nápovedu","browseServer":"Prechádzať server","url":"URL","protocol":"Protokol","upload":"Odoslať","uploadSubmit":"Odoslať na server","image":"Obrázok","flash":"Flash","form":"Formulár","checkbox":"Zaškrtávacie políčko","radio":"Prepínač","textField":"Textové pole","textarea":"Textová oblasť","hiddenField":"Skryté pole","button":"Tlačidlo","select":"Rozbaľovací zoznam","imageButton":"Obrázkové tlačidlo","notSet":"<nenastavené>","id":"Id","name":"Meno","langDir":"Orientácia jazyka","langDirLtr":"Zľava doprava (LTR)","langDirRtl":"Sprava doľava (RTL)","langCode":"Kód jazyka","longDescr":"Dlhý popis URL","cssClass":"Trieda štýlu","advisoryTitle":"Pomocný titulok","cssStyle":"Štýl","ok":"OK","cancel":"Zrušiť","close":"Zatvorit","preview":"Náhľad","resize":"Zmeniť veľkosť","generalTab":"Hlavné","advancedTab":"Rozšírené","validateNumberFailed":"Hodnota nieje číslo.","confirmNewPage":"Prajete si načítat novú stránku? Všetky neuložené zmeny budú stratené. ","confirmCancel":"Niektore možnosti boli zmenené. Naozaj chcete zavrieť okno?","options":"Možnosti","target":"Cieľ","targetNew":"Nové okno (_blank)","targetTop":"Najvrchnejšie okno (_top)","targetSelf":"To isté okno (_self)","targetParent":"Rodičovské okno (_parent)","langDirLTR":"Zľava doprava (LTR)","langDirRTL":"Sprava doľava (RTL)","styles":"Štýl","cssClasses":"Triedy štýlu","width":"Šírka","height":"Výška","align":"Zarovnanie","alignLeft":"Vľavo","alignRight":"Vpravo","alignCenter":"Na stred","alignTop":"Nahor","alignMiddle":"Na stred","alignBottom":"Dole","invalidValue":"Neplatná hodnota.","invalidHeight":"Výška musí byť číslo.","invalidWidth":"Šírka musí byť číslo.","invalidCssLength":"Špecifikovaná hodnota pre pole \"%1\" musí byť kladné číslo s alebo bez platnej CSS mernej jednotky (px, %, in, cm, mm, em, ex, pt alebo pc).","invalidHtmlLength":"Špecifikovaná hodnota pre pole \"%1\" musí byť kladné číslo s alebo bez platnej HTML mernej jednotky (px alebo %).","invalidInlineStyle":"Zadaná hodnota pre inline štýl musí pozostávať s jedného, alebo viac dvojíc formátu \"názov: hodnota\", oddelených bodkočiarkou.","cssLengthTooltip":"Vložte číslo pre hodnotu v pixeloch alebo číslo so správnou CSS jednotou (px, %, in, cm, mm, em, ex, pt alebo pc).","unavailable":"%1<span class=\"cke_accessibility\">, nedostupný</span>"},"about":{"copy":"Copyright © $1. Všetky práva vyhradené.","dlgTitle":"O CKEditor-e","help":"Zaškrtnite $1 pre pomoc.","moreInfo":"Pre informácie o licenciách, prosíme, navštívte našu web stránku:","title":"O CKEditor-e","userGuide":"Používateľská príručka KCEditor-a"},"basicstyles":{"bold":"Tučné","italic":"Kurzíva","strike":"Prečiarknuté","subscript":"Dolný index","superscript":"Horný index","underline":"Podčiarknuté"},"bidi":{"ltr":"Smer textu zľava doprava","rtl":"Smer textu sprava doľava"},"blockquote":{"toolbar":"Citácia"},"clipboard":{"copy":"Kopírovať","copyError":"Bezpečnostné nastavenia Vášho prehliadača nedovoľujú editoru automaticky spustiť operáciu kopírovania. Prosím, použite na to klávesnicu (Ctrl/Cmd+C).","cut":"Vystrihnúť","cutError":"Bezpečnostné nastavenia Vášho prehliadača nedovoľujú editoru automaticky spustiť operáciu vystrihnutia. Prosím, použite na to klávesnicu (Ctrl/Cmd+X).","paste":"Vložiť","pasteArea":"Miesto pre vloženie","pasteMsg":"Prosím, vložte nasledovný rámček použitím klávesnice (<STRONG>Ctrl/Cmd+V</STRONG>) a stlačte OK.","securityMsg":"Kvôli vašim bezpečnostným nastaveniam prehliadača editor nie je schopný pristupovať k vašej schránke na kopírovanie priamo. Vložte to preto do tohto okna.","title":"Vložiť"},"colorbutton":{"auto":"Automaticky","bgColorTitle":"Farba pozadia","colors":{"000":"Čierna","800000":"Maroon","8B4513":"Sedlová hnedá","2F4F4F":"Tmavo bridlicovo sivá","008080":"Modrozelená","000080":"Tmavomodrá","4B0082":"Indigo","696969":"Tmavá sivá","B22222":"Ohňová tehlová","A52A2A":"Hnedá","DAA520":"Zlatobyľ","006400":"Tmavá zelená","40E0D0":"Tyrkysová","0000CD":"Stredná modrá","800080":"Purpurová","808080":"Sivá","F00":"Červená","FF8C00":"Tmavá oranžová","FFD700":"Zlatá","008000":"Zelená","0FF":"Azúrová","00F":"Modrá","EE82EE":"Fialová","A9A9A9":"Tmavá sivá","FFA07A":"Svetlo lososová","FFA500":"Oranžová","FFFF00":"Žltá","00FF00":"Vápenná","AFEEEE":"Svetlo tyrkysová","ADD8E6":"Svetlo modrá","DDA0DD":"Slivková","D3D3D3":"Svetlo sivá","FFF0F5":"Levanduľovo červená","FAEBD7":"Antická biela","FFFFE0":"Svetlo žltá","F0FFF0":"Medová","F0FFFF":"Azúrová","F0F8FF":"Alicovo modrá","E6E6FA":"Levanduľová","FFF":"Biela"},"more":"Viac farieb...","panelTitle":"Farby","textColorTitle":"Farba textu"},"colordialog":{"clear":"Vyčistiť","highlight":"Zvýrazniť","options":"Možnosti farby","selected":"Vybraná farba","title":"Vyberte farbu"},"templates":{"button":"Šablóny","emptyListMsg":"(Žiadne šablóny nedefinované)","insertOption":"Nahradiť aktuálny obsah","options":"Možnosti šablóny","selectPromptMsg":"Prosím vyberte šablónu na otvorenie v editore","title":"Šablóny obsahu"},"contextmenu":{"options":"Možnosti kontextového menu"},"div":{"IdInputLabel":"Id","advisoryTitleInputLabel":"Pomocný titulok","cssClassInputLabel":"Triedy štýlu","edit":"Upraviť Div","inlineStyleInputLabel":"Inline štýl","langDirLTRLabel":"Zľava doprava (LTR)","langDirLabel":"Smer jazyka","langDirRTLLabel":"Zprava doľava (RTL)","languageCodeInputLabel":"Kód jazyka","remove":"Odstrániť Div","styleSelectLabel":"Štýl","title":"Vytvoriť Div kontajner","toolbar":"Vytvoriť Div kontajner"},"toolbar":{"toolbarCollapse":"Zbaliť lištu nástrojov","toolbarExpand":"Rozbaliť lištu nástrojov","toolbarGroups":{"document":"Dokument","clipboard":"Schránka pre kopírovanie/Späť","editing":"Upravovanie","forms":"Formuláre","basicstyles":"Základné štýly","paragraph":"Odstavec","links":"Odkazy","insert":"Vložiť","styles":"Štýly","colors":"Farby","tools":"Nástroje"},"toolbars":"Lišty nástrojov editora"},"elementspath":{"eleLabel":"Cesta prvkov","eleTitle":"%1 prvok"},"find":{"find":"Hľadať","findOptions":"Nájsť možnosti","findWhat":"Čo hľadať:","matchCase":"Rozlišovať malé a veľké písmená","matchCyclic":"Cykliť zhodu","matchWord":"Len celé slová","notFoundMsg":"Hľadaný text nebol nájdený.","replace":"Nahradiť","replaceAll":"Nahradiť všetko","replaceSuccessMsg":"%1 výskyt(ov) nahradených.","replaceWith":"Čím nahradiť:","title":"Nájsť a nahradiť"},"fakeobjects":{"anchor":"Kotva","flash":"Flash animácia","hiddenfield":"Skryté pole","iframe":"IFrame","unknown":"Neznámy objekt"},"flash":{"access":"Prístup skriptu","accessAlways":"Vždy","accessNever":"Nikdy","accessSameDomain":"Rovnaká doména","alignAbsBottom":"Úplne dole","alignAbsMiddle":"Do stredu","alignBaseline":"Na základnú čiaru","alignTextTop":"Na horný okraj textu","bgcolor":"Farba pozadia","chkFull":"Povoliť zobrazenie na celú obrazovku (fullscreen)","chkLoop":"Opakovanie","chkMenu":"Povoliť Flash Menu","chkPlay":"Automatické prehrávanie","flashvars":"Premenné pre Flash","hSpace":"H-medzera","properties":"Vlastnosti Flashu","propertiesTab":"Vlastnosti","quality":"Kvalita","qualityAutoHigh":"Automaticky vysoká","qualityAutoLow":"Automaticky nízka","qualityBest":"Najlepšia","qualityHigh":"Vysoká","qualityLow":"Nízka","qualityMedium":"Stredná","scale":"Mierka","scaleAll":"Zobraziť všetko","scaleFit":"Roztiahnuť, aby sedelo presne","scaleNoBorder":"Bez okrajov","title":"Vlastnosti Flashu","vSpace":"V-medzera","validateHSpace":"H-medzera musí byť číslo.","validateSrc":"URL nesmie byť prázdne.","validateVSpace":"V-medzera musí byť číslo","windowMode":"Mód okna","windowModeOpaque":"Nepriehľadný","windowModeTransparent":"Priehľadný","windowModeWindow":"Okno"},"font":{"fontSize":{"label":"Veľkosť","voiceLabel":"Veľkosť písma","panelTitle":"Veľkosť písma"},"label":"Font","panelTitle":"Názov fontu","voiceLabel":"Font"},"forms":{"button":{"title":"Vlastnosti tlačidla","text":"Text (Hodnota)","type":"Typ","typeBtn":"Tlačidlo","typeSbm":"Odoslať","typeRst":"Resetovať"},"checkboxAndRadio":{"checkboxTitle":"Vlastnosti zaškrtávacieho políčka","radioTitle":"Vlastnosti prepínača (radio button)","value":"Hodnota","selected":"Vybrané (selected)"},"form":{"title":"Vlastnosti formulára","menu":"Vlastnosti formulára","action":"Akcia (action)","method":"Metóda (method)","encoding":"Kódovanie (encoding)"},"hidden":{"title":"Vlastnosti skrytého poľa","name":"Názov (name)","value":"Hodnota"},"select":{"title":"Vlastnosti rozbaľovacieho zoznamu","selectInfo":"Informácie o výbere","opAvail":"Dostupné možnosti","value":"Hodnota","size":"Veľkosť","lines":"riadkov","chkMulti":"Povoliť viacnásobný výber","opText":"Text","opValue":"Hodnota","btnAdd":"Pridať","btnModify":"Upraviť","btnUp":"Hore","btnDown":"Dole","btnSetValue":"Nastaviť ako vybranú hodnotu","btnDelete":"Vymazať"},"textarea":{"title":"Vlastnosti textovej oblasti (textarea)","cols":"Stĺpcov","rows":"Riadkov"},"textfield":{"title":"Vlastnosti textového poľa","name":"Názov (name)","value":"Hodnota","charWidth":"Šírka poľa (podľa znakov)","maxChars":"Maximálny počet znakov","type":"Typ","typeText":"Text","typePass":"Heslo","typeEmail":"Email","typeSearch":"Hľadať","typeTel":"Telefónne číslo","typeUrl":"URL"}},"format":{"label":"Formát","panelTitle":"Formát","tag_address":"Adresa","tag_div":"Normálny (DIV)","tag_h1":"Nadpis 1","tag_h2":"Nadpis 2","tag_h3":"Nadpis 3","tag_h4":"Nadpis 4","tag_h5":"Nadpis 5","tag_h6":"Nadpis 6","tag_p":"Normálny","tag_pre":"Formátovaný"},"horizontalrule":{"toolbar":"Vložiť vodorovnú čiaru"},"iframe":{"border":"Zobraziť rám frame-u","noUrl":"Prosím, vložte URL iframe","scrolling":"Povoliť skrolovanie","title":"Vlastnosti IFrame","toolbar":"IFrame"},"image":{"alertUrl":"Zadajte prosím URL obrázka","alt":"Alternatívny text","border":"Rám (border)","btnUpload":"Odoslať to na server","button2Img":"Chcete zmeniť vybrané obrázkové tlačidlo na jednoduchý obrázok?","hSpace":"H-medzera","img2Button":"Chcete zmeniť vybraný obrázok na obrázkové tlačidlo?","infoTab":"Informácie o obrázku","linkTab":"Odkaz","lockRatio":"Pomer zámky","menu":"Vlastnosti obrázka","resetSize":"Pôvodná veľkosť","title":"Vlastnosti obrázka","titleButton":"Vlastnosti obrázkového tlačidla","upload":"Nahrať","urlMissing":"Chýba URL zdroja obrázka.","vSpace":"V-medzera","validateBorder":"Rám (border) musí byť celé číslo.","validateHSpace":"H-medzera musí byť celé číslo.","validateVSpace":"V-medzera musí byť celé číslo."},"indent":{"indent":"Zväčšiť odsadenie","outdent":"Zmenšiť odsadenie"},"smiley":{"options":"Možnosti smajlíkov","title":"Vložiť smajlíka","toolbar":"Smajlíky"},"justify":{"block":"Zarovnať do bloku","center":"Zarovnať na stred","left":"Zarovnať vľavo","right":"Zarovnať vpravo"},"link":{"acccessKey":"Prístupový kľúč","advanced":"Rozšírené","advisoryContentType":"Pomocný typ obsahu","advisoryTitle":"Pomocný titulok","anchor":{"toolbar":"Kotva","menu":"Upraviť kotvu","title":"Vlastnosti kotvy","name":"Názov kotvy","errorName":"Zadajte prosím názov kotvy","remove":"Odstrániť kotvu"},"anchorId":"Podľa Id objektu","anchorName":"Podľa mena kotvy","charset":"Priradená znaková sada","cssClasses":"Triedy štýlu","emailAddress":"E-Mailová adresa","emailBody":"Telo správy","emailSubject":"Predmet správy","id":"Id","info":"Informácie o odkaze","langCode":"Orientácia jazyka","langDir":"Orientácia jazyka","langDirLTR":"Zľava doprava (LTR)","langDirRTL":"Sprava doľava (RTL)","menu":"Upraviť odkaz","name":"Názov","noAnchors":"(V dokumente nie sú dostupné žiadne kotvy)","noEmail":"Zadajte prosím e-mailovú adresu","noUrl":"Zadajte prosím URL odkazu","other":"<iný>","popupDependent":"Závislosť (Netscape)","popupFeatures":"Vlastnosti vyskakovacieho okna","popupFullScreen":"Celá obrazovka (IE)","popupLeft":"Ľavý okraj","popupLocationBar":"Panel umiestnenia (location bar)","popupMenuBar":"Panel ponuky (menu bar)","popupResizable":"Meniteľná veľkosť (resizable)","popupScrollBars":"Posuvníky (scroll bars)","popupStatusBar":"Stavový riadok (status bar)","popupToolbar":"Panel nástrojov (toolbar)","popupTop":"Horný okraj","rel":"Vzťah (rel)","selectAnchor":"Vybrať kotvu","styles":"Štýl","tabIndex":"Poradie prvku (tab index)","target":"Cieľ","targetFrame":"<rámec>","targetFrameName":"Názov rámu cieľa","targetPopup":"<vyskakovacie okno>","targetPopupName":"Názov vyskakovacieho okna","title":"Odkaz","toAnchor":"Odkaz na kotvu v texte","toEmail":"E-mail","toUrl":"URL","toolbar":"Odkaz","type":"Typ odkazu","unlink":"Odstrániť odkaz","upload":"Nahrať"},"list":{"bulletedlist":"Vložiť/Odstrániť zoznam s odrážkami","numberedlist":"Vložiť/Odstrániť číslovaný zoznam"},"liststyle":{"armenian":"Arménske číslovanie","bulletedTitle":"Vlastnosti odrážkového zoznamu","circle":"Kruh","decimal":"Číselné (1, 2, 3, atď.)","decimalLeadingZero":"Číselné s nulou (01, 02, 03, atď.)","disc":"Disk","georgian":"Gregoriánske číslovanie (an, ban, gan, atď.)","lowerAlpha":"Malé latinské (a, b, c, d, e, atď.)","lowerGreek":"Malé grécke (alfa, beta, gama, atď.)","lowerRoman":"Malé rímske (i, ii, iii, iv, v, atď.)","none":"Nič","notset":"<nenastavené>","numberedTitle":"Vlastnosti číselného zoznamu","square":"Štvorec","start":"Začiatok","type":"Typ","upperAlpha":"Veľké latinské (A, B, C, D, E, atď.)","upperRoman":"Veľké rímske (I, II, III, IV, V, atď.)","validateStartNumber":"Začiatočné číslo číselného zoznamu musí byť celé číslo."},"magicline":{"title":"Sem vložte paragraf"},"maximize":{"maximize":"Maximalizovať","minimize":"Minimalizovať"},"newpage":{"toolbar":"Nová stránka"},"pagebreak":{"alt":"Zalomenie strany","toolbar":"Vložiť oddeľovač stránky pre tlač"},"pastetext":{"button":"Vložiť ako čistý text","title":"Vložiť ako čistý text"},"pastefromword":{"confirmCleanup":"Vkladaný text vyzerá byť skopírovaný z Wordu. Chcete ho automaticky vyčistiť pred vkladaním?","error":"Nebolo možné vyčistiť vložené dáta kvôli internej chybe","title":"Vložiť z Wordu","toolbar":"Vložiť z Wordu"},"preview":{"preview":"Náhľad"},"print":{"toolbar":"Tlač"},"removeformat":{"toolbar":"Odstrániť formátovanie"},"save":{"toolbar":"Uložiť"},"selectall":{"toolbar":"Vybrať všetko"},"showblocks":{"toolbar":"Ukázať bloky"},"sourcearea":{"toolbar":"Zdroj"},"specialchar":{"options":"Možnosti špeciálneho znaku","title":"Výber špeciálneho znaku","toolbar":"Vložiť špeciálny znak"},"scayt":{"about":"O KPPP (Kontrola pravopisu počas písania)","aboutTab":"O","addWord":"Pridať slovo","allCaps":"Ignorovať slová písané veľkými písmenami","dic_create":"Vytvoriť","dic_delete":"Vymazať","dic_field_name":"Názov slovníka","dic_info":"Spočiatku je užívateľský slovník uložený v cookie. Cookie však majú obmedzenú veľkosť. Keď užívateľský slovník narastie do bodu, kedy nemôže byť uložený v cookie, potom musí byť slovník uložený na našom serveri. Pre uloženie vášho osobného slovníka na náš server by ste mali zadať názov pre váš slovník. Ak už máte uložený slovník, prosíme, napíšte jeho názov a kliknite tlačidlo Obnoviť.","dic_rename":"Premenovať","dic_restore":"Obnoviť","dictionariesTab":"Slovníky","disable":"Zakázať KPPP (Kontrola pravopisu počas písania)","emptyDic":"Názov slovníka by nemal byť prázdny.","enable":"Povoliť KPPP (Kontrola pravopisu počas písania)","ignore":"Ignorovať","ignoreAll":"Ignorovať všetko","ignoreDomainNames":"Iznorovať názvy domén","langs":"Jazyky","languagesTab":"Jazyky","mixedCase":"Ignorovať slová so smiešanými veľkými a malými písmenami","mixedWithDigits":"Ignorovať slová s číslami","moreSuggestions":"Viac návrhov","opera_title":"Nepodporované Operou","options":"Možnosti","optionsTab":"Možnosti","title":"Kontrola pravopisu počas písania","toggle":"Prepnúť KPPP (Kontrola pravopisu počas písania)","noSuggestions":"No suggestion"},"stylescombo":{"label":"Štýly","panelTitle":"Formátovanie štýlov","panelTitle1":"Štýly bloku","panelTitle2":"Vnútroriadkové (inline) štýly","panelTitle3":"Štýly objeku"},"table":{"border":"Šírka rámu (border)","caption":"Popis","cell":{"menu":"Bunka","insertBefore":"Vložiť bunku pred","insertAfter":"Vložiť bunku za","deleteCell":"Vymazať bunky","merge":"Zlúčiť bunky","mergeRight":"Zlúčiť doprava","mergeDown":"Zlúčiť dole","splitHorizontal":"Rozdeliť bunky horizontálne","splitVertical":"Rozdeliť bunky vertikálne","title":"Vlastnosti bunky","cellType":"Typ bunky","rowSpan":"Rozsah riadkov","colSpan":"Rozsah stĺpcov","wordWrap":"Zalomovanie riadkov","hAlign":"Horizontálne zarovnanie","vAlign":"Vertikálne zarovnanie","alignBaseline":"Základná čiara (baseline)","bgColor":"Farba pozadia","borderColor":"Farba rámu","data":"Dáta","header":"Hlavička","yes":"Áno","no":"Nie","invalidWidth":"Šírka bunky musí byť číslo.","invalidHeight":"Výška bunky musí byť číslo.","invalidRowSpan":"Rozsah riadkov musí byť celé číslo.","invalidColSpan":"Rozsah stĺpcov musí byť celé číslo.","chooseColor":"Vybrať"},"cellPad":"Odsadenie obsahu (cell padding)","cellSpace":"Vzdialenosť buniek (cell spacing)","column":{"menu":"Stĺpec","insertBefore":"Vložiť stĺpec pred","insertAfter":"Vložiť stĺpec po","deleteColumn":"Zmazať stĺpce"},"columns":"Stĺpce","deleteTable":"Vymazať tabuľku","headers":"Hlavička","headersBoth":"Obe","headersColumn":"Prvý stĺpec","headersNone":"Žiadne","headersRow":"Prvý riadok","invalidBorder":"Širka rámu musí byť číslo.","invalidCellPadding":"Odsadenie v bunkách (cell padding) musí byť kladné číslo.","invalidCellSpacing":"Medzera mädzi bunkami (cell spacing) musí byť kladné číslo.","invalidCols":"Počet stĺpcov musí byť číslo väčšie ako 0.","invalidHeight":"Výška tabuľky musí byť číslo.","invalidRows":"Počet riadkov musí byť číslo väčšie ako 0.","invalidWidth":"Širka tabuľky musí byť číslo.","menu":"Vlastnosti tabuľky","row":{"menu":"Riadok","insertBefore":"Vložiť riadok pred","insertAfter":"Vložiť riadok po","deleteRow":"Vymazať riadky"},"rows":"Riadky","summary":"Prehľad","title":"Vlastnosti tabuľky","toolbar":"Tabuľka","widthPc":"percent","widthPx":"pixelov","widthUnit":"jednotka šírky"},"undo":{"redo":"Znovu","undo":"Späť"},"wsc":{"btnIgnore":"Ignorovať","btnIgnoreAll":"Ignorovať všetko","btnReplace":"Prepísat","btnReplaceAll":"Prepísat všetko","btnUndo":"Späť","changeTo":"Zmeniť na","errorLoading":"Chyba pri načítaní slovníka z adresy: %s.","ieSpellDownload":"Kontrola pravopisu nie je naištalovaná. Chcete ju teraz stiahnuť?","manyChanges":"Kontrola pravopisu dokončená: Bolo zmenených %1 slov","noChanges":"Kontrola pravopisu dokončená: Neboli zmenené žiadne slová","noMispell":"Kontrola pravopisu dokončená: Neboli nájdené žiadne chyby pravopisu","noSuggestions":"- Žiadny návrh -","notAvailable":"Prepáčte, ale služba je momentálne nedostupná.","notInDic":"Nie je v slovníku","oneChange":"Kontrola pravopisu dokončená: Bolo zmenené jedno slovo","progress":"Prebieha kontrola pravopisu...","title":"Skontrolovať pravopis","toolbar":"Kontrola pravopisu"}}; | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/code_generation/ConditionalCodes.py | from .CodeHelpers import decideConversionCheckNeeded, generateExpressionCode
from .Emission import SourceCodeCollector
from .ErrorCodes import (
getErrorExitBoolCode,
getReleaseCode,
getTakeReferenceCode,
)
from .LabelCodes import getBranchingCode, getGotoCode, getLabelCode
def generateConditionCode(condition, emit, context):
if condition.mayRaiseExceptionBool(BaseException):
compare_name = context.allocateTempName("condition_result", "nuitka_bool")
else:
compare_name = context.allocateTempName("condition_result", "bool")
generateExpressionCode(
to_name=compare_name, expression=condition, emit=emit, context=context
)
getBranchingCode(
condition=compare_name.getCType().getTruthCheckCode(compare_name),
emit=emit,
context=context,
)
getReleaseCode(compare_name, emit, context)
def generateConditionalAndOrCode(to_name, expression, emit, context):
# This is a complex beast, handling both "or" and "and" expressions,
# and it needs to micro manage details.
# pylint: disable=too-many-locals
if expression.isExpressionConditionalOr():
prefix = "or_"
else:
prefix = "and_"
true_target = context.allocateLabel(prefix + "left")
false_target = context.allocateLabel(prefix + "right")
end_target = context.allocateLabel(prefix + "end")
old_true_target = context.getTrueBranchTarget()
old_false_target = context.getFalseBranchTarget()
truth_name = context.allocateTempName(prefix + "left_truth", "int")
left_name = context.allocateTempName(prefix + "left_value", to_name.c_type)
right_name = context.allocateTempName(prefix + "right_value", to_name.c_type)
left_value = expression.subnode_left
generateExpressionCode(
to_name=left_name, expression=left_value, emit=emit, context=context
)
# We need to treat this mostly manually here. We remember to release
# this, and we better do this manually later.
needs_ref1 = context.needsCleanup(left_name)
if expression.isExpressionConditionalOr():
context.setTrueBranchTarget(true_target)
context.setFalseBranchTarget(false_target)
else:
context.setTrueBranchTarget(false_target)
context.setFalseBranchTarget(true_target)
left_name.getCType().emitTruthCheckCode(
to_name=truth_name,
value_name=left_name,
emit=emit,
)
needs_check = left_value.mayRaiseExceptionBool(BaseException)
if needs_check:
getErrorExitBoolCode(
condition="%s == -1" % truth_name,
needs_check=True,
emit=emit,
context=context,
)
getBranchingCode(condition="%s == 1" % truth_name, emit=emit, context=context)
getLabelCode(false_target, emit)
# So it's not the left value, then lets release that one right away, it
# is not needed, but we remember if it should be added above.
getReleaseCode(release_name=left_name, emit=emit, context=context)
right_value = expression.subnode_right
# Evaluate the "right" value then.
generateExpressionCode(
to_name=right_name, expression=right_value, emit=emit, context=context
)
# Again, remember the reference count to manage it manually.
needs_ref2 = context.needsCleanup(right_name)
if needs_ref2:
context.removeCleanupTempName(right_name)
if not needs_ref2 and needs_ref1:
getTakeReferenceCode(right_name, emit)
to_name.getCType().emitAssignConversionCode(
to_name=to_name,
value_name=right_name,
needs_check=decideConversionCheckNeeded(to_name, right_value),
emit=emit,
context=context,
)
getGotoCode(end_target, emit)
getLabelCode(true_target, emit)
if not needs_ref1 and needs_ref2:
getTakeReferenceCode(left_name, emit)
to_name.getCType().emitAssignConversionCode(
to_name=to_name,
value_name=left_name,
needs_check=decideConversionCheckNeeded(to_name, left_value),
emit=emit,
context=context,
)
getLabelCode(end_target, emit)
if needs_ref1 or needs_ref2:
context.addCleanupTempName(to_name)
context.setTrueBranchTarget(old_true_target)
context.setFalseBranchTarget(old_false_target)
def generateConditionalCode(to_name, expression, emit, context):
true_target = context.allocateLabel("condexpr_true")
false_target = context.allocateLabel("condexpr_false")
end_target = context.allocateLabel("condexpr_end")
old_true_target = context.getTrueBranchTarget()
old_false_target = context.getFalseBranchTarget()
context.setTrueBranchTarget(true_target)
context.setFalseBranchTarget(false_target)
generateConditionCode(
condition=expression.subnode_condition, emit=emit, context=context
)
getLabelCode(true_target, emit)
generateExpressionCode(
to_name=to_name,
expression=expression.subnode_expression_yes,
emit=emit,
context=context,
)
needs_ref1 = context.needsCleanup(to_name)
# Must not clean this up in other expression.
if needs_ref1:
context.removeCleanupTempName(to_name)
real_emit = emit
emit = SourceCodeCollector()
generateExpressionCode(
to_name=to_name,
expression=expression.subnode_expression_no,
emit=emit,
context=context,
)
needs_ref2 = context.needsCleanup(to_name)
# TODO: Need to buffer generated code, so we can emit extra reference if
# not same.
if needs_ref1 and not needs_ref2:
getGotoCode(end_target, real_emit)
getLabelCode(false_target, real_emit)
for line in emit.codes:
real_emit(line)
emit = real_emit
getTakeReferenceCode(to_name, emit)
context.addCleanupTempName(to_name)
elif not needs_ref1 and needs_ref2:
getTakeReferenceCode(to_name, real_emit)
getGotoCode(end_target, real_emit)
getLabelCode(false_target, real_emit)
for line in emit.codes:
real_emit(line)
emit = real_emit
else:
getGotoCode(end_target, real_emit)
getLabelCode(false_target, real_emit)
for line in emit.codes:
real_emit(line)
emit = real_emit
getLabelCode(end_target, emit)
context.setTrueBranchTarget(old_true_target)
context.setFalseBranchTarget(old_false_target) | PypiClean |
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/node_modules/glob/common.js | exports.setopts = setopts
exports.ownProp = ownProp
exports.makeAbs = makeAbs
exports.finish = finish
exports.mark = mark
exports.isIgnored = isIgnored
exports.childrenIgnored = childrenIgnored
function ownProp (obj, field) {
return Object.prototype.hasOwnProperty.call(obj, field)
}
var fs = require("fs")
var path = require("path")
var minimatch = require("minimatch")
var isAbsolute = require("path-is-absolute")
var Minimatch = minimatch.Minimatch
function alphasort (a, b) {
return a.localeCompare(b, 'en')
}
function setupIgnores (self, options) {
self.ignore = options.ignore || []
if (!Array.isArray(self.ignore))
self.ignore = [self.ignore]
if (self.ignore.length) {
self.ignore = self.ignore.map(ignoreMap)
}
}
// ignore patterns are always in dot:true mode.
function ignoreMap (pattern) {
var gmatcher = null
if (pattern.slice(-3) === '/**') {
var gpattern = pattern.replace(/(\/\*\*)+$/, '')
gmatcher = new Minimatch(gpattern, { dot: true })
}
return {
matcher: new Minimatch(pattern, { dot: true }),
gmatcher: gmatcher
}
}
function setopts (self, pattern, options) {
if (!options)
options = {}
// base-matching: just use globstar for that.
if (options.matchBase && -1 === pattern.indexOf("/")) {
if (options.noglobstar) {
throw new Error("base matching requires globstar")
}
pattern = "**/" + pattern
}
self.silent = !!options.silent
self.pattern = pattern
self.strict = options.strict !== false
self.realpath = !!options.realpath
self.realpathCache = options.realpathCache || Object.create(null)
self.follow = !!options.follow
self.dot = !!options.dot
self.mark = !!options.mark
self.nodir = !!options.nodir
if (self.nodir)
self.mark = true
self.sync = !!options.sync
self.nounique = !!options.nounique
self.nonull = !!options.nonull
self.nosort = !!options.nosort
self.nocase = !!options.nocase
self.stat = !!options.stat
self.noprocess = !!options.noprocess
self.absolute = !!options.absolute
self.fs = options.fs || fs
self.maxLength = options.maxLength || Infinity
self.cache = options.cache || Object.create(null)
self.statCache = options.statCache || Object.create(null)
self.symlinks = options.symlinks || Object.create(null)
setupIgnores(self, options)
self.changedCwd = false
var cwd = process.cwd()
if (!ownProp(options, "cwd"))
self.cwd = cwd
else {
self.cwd = path.resolve(options.cwd)
self.changedCwd = self.cwd !== cwd
}
self.root = options.root || path.resolve(self.cwd, "/")
self.root = path.resolve(self.root)
if (process.platform === "win32")
self.root = self.root.replace(/\\/g, "/")
// TODO: is an absolute `cwd` supposed to be resolved against `root`?
// e.g. { cwd: '/test', root: __dirname } === path.join(__dirname, '/test')
self.cwdAbs = isAbsolute(self.cwd) ? self.cwd : makeAbs(self, self.cwd)
if (process.platform === "win32")
self.cwdAbs = self.cwdAbs.replace(/\\/g, "/")
self.nomount = !!options.nomount
// disable comments and negation in Minimatch.
// Note that they are not supported in Glob itself anyway.
options.nonegate = true
options.nocomment = true
// always treat \ in patterns as escapes, not path separators
options.allowWindowsEscape = false
self.minimatch = new Minimatch(pattern, options)
self.options = self.minimatch.options
}
function finish (self) {
var nou = self.nounique
var all = nou ? [] : Object.create(null)
for (var i = 0, l = self.matches.length; i < l; i ++) {
var matches = self.matches[i]
if (!matches || Object.keys(matches).length === 0) {
if (self.nonull) {
// do like the shell, and spit out the literal glob
var literal = self.minimatch.globSet[i]
if (nou)
all.push(literal)
else
all[literal] = true
}
} else {
// had matches
var m = Object.keys(matches)
if (nou)
all.push.apply(all, m)
else
m.forEach(function (m) {
all[m] = true
})
}
}
if (!nou)
all = Object.keys(all)
if (!self.nosort)
all = all.sort(alphasort)
// at *some* point we statted all of these
if (self.mark) {
for (var i = 0; i < all.length; i++) {
all[i] = self._mark(all[i])
}
if (self.nodir) {
all = all.filter(function (e) {
var notDir = !(/\/$/.test(e))
var c = self.cache[e] || self.cache[makeAbs(self, e)]
if (notDir && c)
notDir = c !== 'DIR' && !Array.isArray(c)
return notDir
})
}
}
if (self.ignore.length)
all = all.filter(function(m) {
return !isIgnored(self, m)
})
self.found = all
}
function mark (self, p) {
var abs = makeAbs(self, p)
var c = self.cache[abs]
var m = p
if (c) {
var isDir = c === 'DIR' || Array.isArray(c)
var slash = p.slice(-1) === '/'
if (isDir && !slash)
m += '/'
else if (!isDir && slash)
m = m.slice(0, -1)
if (m !== p) {
var mabs = makeAbs(self, m)
self.statCache[mabs] = self.statCache[abs]
self.cache[mabs] = self.cache[abs]
}
}
return m
}
// lotta situps...
function makeAbs (self, f) {
var abs = f
if (f.charAt(0) === '/') {
abs = path.join(self.root, f)
} else if (isAbsolute(f) || f === '') {
abs = f
} else if (self.changedCwd) {
abs = path.resolve(self.cwd, f)
} else {
abs = path.resolve(f)
}
if (process.platform === 'win32')
abs = abs.replace(/\\/g, '/')
return abs
}
// Return true, if pattern ends with globstar '**', for the accompanying parent directory.
// Ex:- If node_modules/** is the pattern, add 'node_modules' to ignore list along with it's contents
function isIgnored (self, path) {
if (!self.ignore.length)
return false
return self.ignore.some(function(item) {
return item.matcher.match(path) || !!(item.gmatcher && item.gmatcher.match(path))
})
}
function childrenIgnored (self, path) {
if (!self.ignore.length)
return false
return self.ignore.some(function(item) {
return !!(item.gmatcher && item.gmatcher.match(path))
})
} | PypiClean |
/20220429_pdfminer_jameslp310-0.0.2-py3-none-any.whl/pdfminer/cmapdb.py | import gzip
import logging
import os
import os.path
import pickle as pickle
import struct
import sys
from typing import (
Any,
BinaryIO,
Dict,
Iterable,
Iterator,
List,
MutableMapping,
Optional,
TextIO,
Tuple,
Union,
cast,
Set,
)
from .encodingdb import name2unicode
from .psparser import KWD
from .psparser import PSEOF
from .psparser import PSKeyword
from .psparser import PSLiteral
from .psparser import PSStackParser
from .psparser import PSSyntaxError
from .psparser import literal_name
from .utils import choplist
from .utils import nunpack
log = logging.getLogger(__name__)
class CMapError(Exception):
pass
class CMapBase:
debug = 0
def __init__(self, **kwargs: object) -> None:
self.attrs: MutableMapping[str, object] = kwargs.copy()
def is_vertical(self) -> bool:
return self.attrs.get("WMode", 0) != 0
def set_attr(self, k: str, v: object) -> None:
self.attrs[k] = v
def add_code2cid(self, code: str, cid: int) -> None:
pass
def add_cid2unichr(self, cid: int, code: Union[PSLiteral, bytes, int]) -> None:
pass
def use_cmap(self, cmap: "CMapBase") -> None:
pass
def decode(self, code: bytes) -> Iterable[int]:
raise NotImplementedError
class CMap(CMapBase):
def __init__(self, **kwargs: Union[str, int]) -> None:
CMapBase.__init__(self, **kwargs)
self.code2cid: Dict[int, object] = {}
def __repr__(self) -> str:
return "<CMap: %s>" % self.attrs.get("CMapName")
def use_cmap(self, cmap: CMapBase) -> None:
assert isinstance(cmap, CMap), str(type(cmap))
def copy(dst: Dict[int, object], src: Dict[int, object]) -> None:
for (k, v) in src.items():
if isinstance(v, dict):
d: Dict[int, object] = {}
dst[k] = d
copy(d, v)
else:
dst[k] = v
copy(self.code2cid, cmap.code2cid)
def decode(self, code: bytes) -> Iterator[int]:
log.debug("decode: %r, %r", self, code)
d = self.code2cid
for i in iter(code):
if i in d:
x = d[i]
if isinstance(x, int):
yield x
d = self.code2cid
else:
d = cast(Dict[int, object], x)
else:
d = self.code2cid
def dump(
self,
out: TextIO = sys.stdout,
code2cid: Optional[Dict[int, object]] = None,
code: Tuple[int, ...] = (),
) -> None:
if code2cid is None:
code2cid = self.code2cid
code = ()
for (k, v) in sorted(code2cid.items()):
c = code + (k,)
if isinstance(v, int):
out.write("code %r = cid %d\n" % (c, v))
else:
self.dump(out=out, code2cid=cast(Dict[int, object], v), code=c)
class IdentityCMap(CMapBase):
def decode(self, code: bytes) -> Tuple[int, ...]:
n = len(code) // 2
if n:
return struct.unpack(">%dH" % n, code)
else:
return ()
class IdentityCMapByte(IdentityCMap):
def decode(self, code: bytes) -> Tuple[int, ...]:
n = len(code)
if n:
return struct.unpack(">%dB" % n, code)
else:
return ()
class UnicodeMap(CMapBase):
def __init__(self, **kwargs: Union[str, int]) -> None:
CMapBase.__init__(self, **kwargs)
self.cid2unichr: Dict[int, str] = {}
def __repr__(self) -> str:
return "<UnicodeMap: %s>" % self.attrs.get("CMapName")
def get_unichr(self, cid: int) -> str:
log.debug("get_unichr: %r, %r", self, cid)
return self.cid2unichr[cid]
def dump(self, out: TextIO = sys.stdout) -> None:
for (k, v) in sorted(self.cid2unichr.items()):
out.write("cid %d = unicode %r\n" % (k, v))
class IdentityUnicodeMap(UnicodeMap):
def get_unichr(self, cid: int) -> str:
"""Interpret character id as unicode codepoint"""
log.debug("get_unichr: %r, %r", self, cid)
return chr(cid)
class FileCMap(CMap):
def add_code2cid(self, code: str, cid: int) -> None:
assert isinstance(code, str) and isinstance(cid, int), str(
(type(code), type(cid))
)
d = self.code2cid
for c in code[:-1]:
ci = ord(c)
if ci in d:
d = cast(Dict[int, object], d[ci])
else:
t: Dict[int, object] = {}
d[ci] = t
d = t
ci = ord(code[-1])
d[ci] = cid
class FileUnicodeMap(UnicodeMap):
def add_cid2unichr(self, cid: int, code: Union[PSLiteral, bytes, int]) -> None:
assert isinstance(cid, int), str(type(cid))
if isinstance(code, PSLiteral):
# Interpret as an Adobe glyph name.
assert isinstance(code.name, str)
self.cid2unichr[cid] = name2unicode(code.name)
elif isinstance(code, bytes):
# Interpret as UTF-16BE.
self.cid2unichr[cid] = code.decode("UTF-16BE", "ignore")
elif isinstance(code, int):
self.cid2unichr[cid] = chr(code)
else:
raise TypeError(code)
class PyCMap(CMap):
def __init__(self, name: str, module: Any) -> None:
super().__init__(CMapName=name)
self.code2cid = module.CODE2CID
if module.IS_VERTICAL:
self.attrs["WMode"] = 1
class PyUnicodeMap(UnicodeMap):
def __init__(self, name: str, module: Any, vertical: bool) -> None:
super().__init__(CMapName=name)
if vertical:
self.cid2unichr = module.CID2UNICHR_V
self.attrs["WMode"] = 1
else:
self.cid2unichr = module.CID2UNICHR_H
class CMapDB:
_cmap_cache: Dict[str, PyCMap] = {}
_umap_cache: Dict[str, List[PyUnicodeMap]] = {}
class CMapNotFound(CMapError):
pass
@classmethod
def _load_data(cls, name: str) -> Any:
name = name.replace("\0", "")
filename = "%s.pickle.gz" % name
log.debug("loading: %r", name)
cmap_paths = (
os.environ.get("CMAP_PATH", "/usr/share/pdfminer/"),
os.path.join(os.path.dirname(__file__), "cmap"),
)
for directory in cmap_paths:
path = os.path.join(directory, filename)
if os.path.exists(path):
gzfile = gzip.open(path)
try:
return type(str(name), (), pickle.loads(gzfile.read()))
finally:
gzfile.close()
else:
raise CMapDB.CMapNotFound(name)
@classmethod
def get_cmap(cls, name: str) -> CMapBase:
if name == "Identity-H":
return IdentityCMap(WMode=0)
elif name == "Identity-V":
return IdentityCMap(WMode=1)
elif name == "OneByteIdentityH":
return IdentityCMapByte(WMode=0)
elif name == "OneByteIdentityV":
return IdentityCMapByte(WMode=1)
try:
return cls._cmap_cache[name]
except KeyError:
pass
data = cls._load_data(name)
cls._cmap_cache[name] = cmap = PyCMap(name, data)
return cmap
@classmethod
def get_unicode_map(cls, name: str, vertical: bool = False) -> UnicodeMap:
try:
return cls._umap_cache[name][vertical]
except KeyError:
pass
data = cls._load_data("to-unicode-%s" % name)
cls._umap_cache[name] = [PyUnicodeMap(name, data, v) for v in (False, True)]
return cls._umap_cache[name][vertical]
class CMapParser(PSStackParser[PSKeyword]):
def __init__(self, cmap: CMapBase, fp: BinaryIO) -> None:
PSStackParser.__init__(self, fp)
self.cmap = cmap
# some ToUnicode maps don't have "begincmap" keyword.
self._in_cmap = True
self._warnings: Set[str] = set()
return
def run(self) -> None:
try:
self.nextobject()
except PSEOF:
pass
return
KEYWORD_BEGINCMAP = KWD(b"begincmap")
KEYWORD_ENDCMAP = KWD(b"endcmap")
KEYWORD_USECMAP = KWD(b"usecmap")
KEYWORD_DEF = KWD(b"def")
KEYWORD_BEGINCODESPACERANGE = KWD(b"begincodespacerange")
KEYWORD_ENDCODESPACERANGE = KWD(b"endcodespacerange")
KEYWORD_BEGINCIDRANGE = KWD(b"begincidrange")
KEYWORD_ENDCIDRANGE = KWD(b"endcidrange")
KEYWORD_BEGINCIDCHAR = KWD(b"begincidchar")
KEYWORD_ENDCIDCHAR = KWD(b"endcidchar")
KEYWORD_BEGINBFRANGE = KWD(b"beginbfrange")
KEYWORD_ENDBFRANGE = KWD(b"endbfrange")
KEYWORD_BEGINBFCHAR = KWD(b"beginbfchar")
KEYWORD_ENDBFCHAR = KWD(b"endbfchar")
KEYWORD_BEGINNOTDEFRANGE = KWD(b"beginnotdefrange")
KEYWORD_ENDNOTDEFRANGE = KWD(b"endnotdefrange")
def do_keyword(self, pos: int, token: PSKeyword) -> None:
"""ToUnicode CMaps
See Section 5.9.2 - ToUnicode CMaps of the PDF Reference.
"""
if token is self.KEYWORD_BEGINCMAP:
self._in_cmap = True
self.popall()
return
elif token is self.KEYWORD_ENDCMAP:
self._in_cmap = False
return
if not self._in_cmap:
return
if token is self.KEYWORD_DEF:
try:
((_, k), (_, v)) = self.pop(2)
self.cmap.set_attr(literal_name(k), v)
except PSSyntaxError:
pass
return
if token is self.KEYWORD_USECMAP:
try:
((_, cmapname),) = self.pop(1)
self.cmap.use_cmap(CMapDB.get_cmap(literal_name(cmapname)))
except PSSyntaxError:
pass
except CMapDB.CMapNotFound:
pass
return
if token is self.KEYWORD_BEGINCODESPACERANGE:
self.popall()
return
if token is self.KEYWORD_ENDCODESPACERANGE:
self.popall()
return
if token is self.KEYWORD_BEGINCIDRANGE:
self.popall()
return
if token is self.KEYWORD_ENDCIDRANGE:
objs = [obj for (__, obj) in self.popall()]
for (start_byte, end_byte, cid) in choplist(3, objs):
if not isinstance(start_byte, bytes):
self._warn_once("The start object of begincidrange is not a byte.")
continue
if not isinstance(end_byte, bytes):
self._warn_once("The end object of begincidrange is not a byte.")
continue
if not isinstance(cid, int):
self._warn_once("The cid object of begincidrange is not a byte.")
continue
if len(start_byte) != len(end_byte):
self._warn_once(
"The start and end byte of begincidrange have "
"different lengths."
)
continue
start_prefix = start_byte[:-4]
end_prefix = end_byte[:-4]
if start_prefix != end_prefix:
self._warn_once(
"The prefix of the start and end byte of "
"begincidrange are not the same."
)
continue
svar = start_byte[-4:]
evar = end_byte[-4:]
start = nunpack(svar)
end = nunpack(evar)
vlen = len(svar)
for i in range(end - start + 1):
x = start_prefix + struct.pack(">L", start + i)[-vlen:]
self.cmap.add_cid2unichr(cid + i, x)
return
if token is self.KEYWORD_BEGINCIDCHAR:
self.popall()
return
if token is self.KEYWORD_ENDCIDCHAR:
objs = [obj for (__, obj) in self.popall()]
for (cid, code) in choplist(2, objs):
if isinstance(code, bytes) and isinstance(cid, int):
self.cmap.add_cid2unichr(cid, code)
return
if token is self.KEYWORD_BEGINBFRANGE:
self.popall()
return
if token is self.KEYWORD_ENDBFRANGE:
objs = [obj for (__, obj) in self.popall()]
for (start_byte, end_byte, code) in choplist(3, objs):
if not isinstance(start_byte, bytes):
self._warn_once("The start object is not a byte.")
continue
if not isinstance(end_byte, bytes):
self._warn_once("The end object is not a byte.")
continue
if len(start_byte) != len(end_byte):
self._warn_once("The start and end byte have different lengths.")
continue
start = nunpack(start_byte)
end = nunpack(end_byte)
if isinstance(code, list):
if len(code) != end - start + 1:
self._warn_once(
"The difference between the start and end "
"offsets does not match the code length."
)
for cid, unicode_value in zip(range(start, end + 1), code):
self.cmap.add_cid2unichr(cid, unicode_value)
else:
assert isinstance(code, bytes)
var = code[-4:]
base = nunpack(var)
prefix = code[:-4]
vlen = len(var)
for i in range(end - start + 1):
x = prefix + struct.pack(">L", base + i)[-vlen:]
self.cmap.add_cid2unichr(start + i, x)
return
if token is self.KEYWORD_BEGINBFCHAR:
self.popall()
return
if token is self.KEYWORD_ENDBFCHAR:
objs = [obj for (__, obj) in self.popall()]
for (cid, code) in choplist(2, objs):
if isinstance(cid, bytes) and isinstance(code, bytes):
self.cmap.add_cid2unichr(nunpack(cid), code)
return
if token is self.KEYWORD_BEGINNOTDEFRANGE:
self.popall()
return
if token is self.KEYWORD_ENDNOTDEFRANGE:
self.popall()
return
self.push((pos, token))
def _warn_once(self, msg: str) -> None:
"""Warn once for each unique message"""
if msg not in self._warnings:
self._warnings.add(msg)
base_msg = (
"Ignoring (part of) ToUnicode map because the PDF data "
"does not conform to the format. This could result in "
"(cid) values in the output. "
)
log.warning(base_msg + msg)
def main(argv: List[str]) -> None:
args = argv[1:]
for fname in args:
fp = open(fname, "rb")
cmap = FileUnicodeMap()
CMapParser(cmap, fp).run()
fp.close()
cmap.dump()
return
if __name__ == "__main__":
main(sys.argv) | PypiClean |
/4cdl-0.1.tar.gz/4cdl-0.1/4cdl.py | from urlparse import urlparse
import argparse
import httplib
import urllib2
import re
import time
import json
import os
import threading
sleep_time = 10
wait_thread_sleep_time = 2
cache_string = "Run out of free thread. Retry after" + \
str(wait_thread_sleep_time) + "second"
number_of_thread = 10
class downloadThread (threading.Thread):
def __init__(self, url, folder):
threading.Thread.__init__(self)
self.url = url
self.folder = folder
def run(self):
print "Starting download thread for " + self.url
download(self.url, self.folder)
print "Exiting download thread for " + self.url
def download(url, folder):
file_name = '.\\' + folder + '\\' + url.split('/')[-1]
if not os.path.exists('.\\' + folder + '\\'):
os.makedirs('.\\' + folder + '\\')
headers = {'User-Agent': 'Mozilla/5.0'}
req = urllib2.Request(url, None, headers)
u = urllib2.urlopen(req)
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
# Check if file is already downloaded
if os.path.isfile(file_name) and file_size == os.stat(file_name).st_size:
print "File "+file_name+" is already downloaded"
return
# Begin download
file_size_dl = 0
block_sz = 1024
with open(file_name, 'wb') as f:
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r" [%3.2f%%]" % (file_size_dl * 100. / file_size)
print "Downloading:" + file_name + status
def check_thread(board, sid):
prev_img_list = []
while True:
myConnection = httplib.HTTPSConnection(
"a.4cdn.org")
myConnection.request("GET", "/" + board + "/thread/" + sid + ".json")
reply = myConnection.getresponse()
print reply.status, reply.reason
if reply.status == 404:
print "404 Not found. Please check the URL again!"
break
temp_json = reply.read()
img_list = re.findall(r'"filename":".+?".+?"tim":.+?,', temp_json)
if not os.path.exists('.\\' + board + sid + '\\'):
os.makedirs('.\\' + board + sid + '\\')
with open('.\\' + board + sid + '\\' + sid + ".json", 'wb') as f:
f.write(temp_json)
# Print img_list
myConnection.close()
for i in img_list[len(prev_img_list):]:
j = json.loads('{'+i[:-1]+'}')
download_link = \
"http://i.4cdn.org/" + board + "/" + str(j['tim']) + j['ext']
print download_link
while (threading.activeCount() == number_of_thread):
print cache_string
time.sleep(wait_thread_sleep_time)
downloadThread(download_link, board + sid).start()
prev_img_list = img_list
time.sleep(sleep_time)
def parse_thread_URL(url):
url_components = urlparse(url).path.split('/')
return url_components[1], url_components[3]
prog_description = 'Download all images and json of a 4chan thread until '\
'thread dies. Resume and multi-thread download supported.'\
'From json and the images, the original html can be generated.'
parser = argparse.ArgumentParser(description=prog_description)
parser.add_argument('threadURL', metavar='Thread_URL',
help='The thread URL for example '
'http://boards.4chan.org/biz/thread/1873336', default=10)
parser.add_argument('-t', '--thread_num', metavar='number',
help='The number of download thread, default is 10')
args = parser.parse_args()
number_of_thread = args.thread_num
board, thread_id = parse_thread_URL(args.threadURL)
check_thread(board, thread_id) | PypiClean |
/Nodes-1.2.tar.gz/Nodes-1.2/ez_setup.py | import sys
DEFAULT_VERSION = "0.6c9"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:]) | PypiClean |
/Misago-0.36.1.tar.gz/Misago-0.36.1/misago/users/views/profile.py | from django.contrib.auth import get_user_model
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.views import View
from ...acl.objectacl import add_acl_to_obj
from ...core.shortcuts import paginate, pagination_dict, validate_slug
from ..bans import get_user_ban
from ..online.utils import get_user_status
from ..pages import user_profile
from ..profilefields import profilefields, serialize_profilefields_data
from ..serializers import BanDetailsSerializer, UsernameChangeSerializer, UserSerializer
from ..viewmodels import Followers, Follows, UserPosts, UserThreads
User = get_user_model()
class ProfileView(View):
def get(self, request, *args, **kwargs):
profile = self.get_profile(request, kwargs.pop("pk"), kwargs.pop("slug"))
# resolve that we can display requested section
sections = user_profile.get_sections(request, profile)
active_section = self.get_active_section(sections)
if not active_section:
raise Http404()
profile.status = get_user_status(request, profile)
context_data = self.get_context_data(request, profile)
self.complete_frontend_context(request, profile, sections)
self.complete_context_data(request, profile, sections, context_data)
return render(request, self.template_name, context_data)
def get_profile(self, request, pk, slug):
queryset = User.objects.select_related("rank", "online_tracker", "ban_cache")
profile = get_object_or_404(queryset, pk=pk)
if not profile.is_active and not request.user.is_staff:
raise Http404()
validate_slug(profile, slug)
add_acl_to_obj(request.user_acl, profile)
return profile
def get_active_section(self, sections):
for section in sections:
if section["is_active"]:
return section
def get_context_data(self, request, profile):
return {}
def complete_frontend_context(self, request, profile, sections):
request.frontend_context["PROFILE_PAGES"] = []
for section in sections:
request.frontend_context["PROFILE_PAGES"].append(
{
"name": str(section["name"]),
"icon": section["icon"],
"meta": section.get("metadata"),
"component": section["component"],
}
)
request.frontend_context["PROFILE"] = UserProfileSerializer(
profile, context={"request": request}
).data
if not profile.is_active:
request.frontend_context["PROFILE"]["is_active"] = False
if profile.is_deleting_account:
request.frontend_context["PROFILE"]["is_deleting_account"] = True
def complete_context_data(self, request, profile, sections, context):
context["profile"] = profile
context["sections"] = sections
for section in sections:
if section["is_active"]:
context["active_section"] = section
break
if request.user.is_authenticated:
is_authenticated_user = profile.pk == request.user.pk
context.update(
{
"is_authenticated_user": is_authenticated_user,
"show_email": is_authenticated_user,
}
)
if not context["show_email"]:
context["show_email"] = request.user_acl["can_see_users_emails"]
else:
context.update({"is_authenticated_user": False, "show_email": False})
class LandingView(ProfileView):
def get(self, request, *args, **kwargs):
profile = self.get_profile(request, kwargs.pop("pk"), kwargs.pop("slug"))
return redirect(
user_profile.get_default_link(), slug=profile.slug, pk=profile.pk
)
class UserPostsView(ProfileView):
template_name = "misago/profile/posts.html"
def get_context_data(self, request, profile):
feed = UserPosts(request, profile)
request.frontend_context["POSTS"] = feed.get_frontend_context()
return feed.get_template_context()
class UserThreadsView(ProfileView):
template_name = "misago/profile/threads.html"
def get_context_data(self, request, profile):
feed = UserThreads(request, profile)
request.frontend_context["POSTS"] = feed.get_frontend_context()
return feed.get_template_context()
class UserFollowersView(ProfileView):
template_name = "misago/profile/followers.html"
def get_context_data(self, request, profile):
users = Followers(request, profile)
request.frontend_context["PROFILE_FOLLOWERS"] = users.get_frontend_context()
return users.get_template_context()
class UserFollowsView(ProfileView):
template_name = "misago/profile/follows.html"
def get_context_data(self, request, profile):
users = Follows(request, profile)
request.frontend_context["PROFILE_FOLLOWS"] = users.get_frontend_context()
return users.get_template_context()
class UserProfileDetailsView(ProfileView):
template_name = "misago/profile/details.html"
def get_context_data(self, request, profile):
details = serialize_profilefields_data(request, profilefields, profile)
request.frontend_context["PROFILE_DETAILS"] = details
return {"profile_details": details}
class UserUsernameHistoryView(ProfileView):
template_name = "misago/profile/username_history.html"
def get_context_data(self, request, profile):
queryset = profile.namechanges.select_related("user", "changed_by")
queryset = queryset.order_by("-id")
page = paginate(queryset, None, 14, 4)
data = pagination_dict(page)
data.update(
{"results": UsernameChangeSerializer(page.object_list, many=True).data}
)
request.frontend_context["PROFILE_NAME_HISTORY"] = data
return {"history": page.object_list, "count": data["count"]}
class UserBanView(ProfileView):
template_name = "misago/profile/ban_details.html"
def get_context_data(self, request, profile):
ban = get_user_ban(profile, request.cache_versions)
request.frontend_context["PROFILE_BAN"] = BanDetailsSerializer(ban).data
return {"ban": ban}
UserProfileSerializer = UserSerializer.subset_fields(
"id",
"username",
"slug",
"email",
"joined_on",
"rank",
"title",
"avatars",
"is_avatar_locked",
"signature",
"is_signature_locked",
"followers",
"following",
"threads",
"posts",
"acl",
"is_followed",
"is_blocked",
"real_name",
"status",
"api",
"url",
) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/mobile/TabBarButton.js | define("dojox/mobile/TabBarButton",["dojo/_base/declare","dojo/_base/lang","dojo/_base/window","dojo/dom-class","dojo/dom-construct","dijit/registry","./common","./_ItemBase"],function(_1,_2,_3,_4,_5,_6,_7,_8){
return _1("dojox.mobile.TabBarButton",_8,{icon1:"",icon2:"",iconPos1:"",iconPos2:"",selected:false,transition:"none",tag:"LI",selectOne:true,inheritParams:function(){
if(this.icon&&!this.icon1){
this.icon1=this.icon;
}
var _9=this.getParent();
if(_9){
if(!this.transition){
this.transition=_9.transition;
}
if(this.icon1&&_9.iconBase&&_9.iconBase.charAt(_9.iconBase.length-1)==="/"){
this.icon1=_9.iconBase+this.icon1;
}
if(!this.icon1){
this.icon1=_9.iconBase;
}
if(!this.iconPos1){
this.iconPos1=_9.iconPos;
}
if(this.icon2&&_9.iconBase&&_9.iconBase.charAt(_9.iconBase.length-1)==="/"){
this.icon2=_9.iconBase+this.icon2;
}
if(!this.icon2){
this.icon2=_9.iconBase||this.icon1;
}
if(!this.iconPos2){
this.iconPos2=_9.iconPos||this.iconPos1;
}
}
},buildRendering:function(){
var a=this.anchorNode=_5.create("A",{className:"mblTabBarButtonAnchor"});
this.connect(a,"onclick","onClick");
this.box=_5.create("DIV",{className:"mblTabBarButtonTextBox"},a);
var _a=this.box;
var _b="";
var r=this.srcNodeRef;
if(r){
for(var i=0,_c=r.childNodes.length;i<_c;i++){
var n=r.firstChild;
if(n.nodeType===3){
_b+=_2.trim(n.nodeValue);
}
_a.appendChild(n);
}
}
if(!this.label){
this.label=_b;
}
this.domNode=this.srcNodeRef||_5.create(this.tag);
this.containerNode=this.domNode;
this.domNode.appendChild(a);
if(this.domNode.className.indexOf("mblDomButton")!=-1){
var _d=_5.create("DIV",null,a);
_7.createDomButton(this.domNode,null,_d);
_4.add(this.domNode,"mblTabButtonDomButton");
_4.add(_d,"mblTabButtonDomButtonClass");
}
if((this.icon1||this.icon).indexOf("mblDomButton")!=-1){
_4.add(this.domNode,"mblTabButtonDomButton");
}
},startup:function(){
if(this._started){
return;
}
this.inheritParams();
var _e=this.getParent();
var _f=_e?_e._clsName:"mblTabBarButton";
_4.add(this.domNode,_f+(this.selected?" mblTabButtonSelected":""));
if(_e&&_e.barType=="segmentedControl"){
_4.remove(this.domNode,"mblTabBarButton");
_4.add(this.domNode,_e._clsName);
this.box.className="";
}
this.set({icon1:this.icon1,icon2:this.icon2});
this.inherited(arguments);
},select:function(){
if(arguments[0]){
this.selected=false;
_4.remove(this.domNode,"mblTabButtonSelected");
}else{
this.selected=true;
_4.add(this.domNode,"mblTabButtonSelected");
for(var i=0,c=this.domNode.parentNode.childNodes;i<c.length;i++){
if(c[i].nodeType!=1){
continue;
}
var w=_6.byNode(c[i]);
if(w&&w!=this){
w.deselect();
}
}
}
if(this.iconNode1){
this.iconNode1.style.visibility=this.selected?"hidden":"";
}
if(this.iconNode2){
this.iconNode2.style.visibility=this.selected?"":"hidden";
}
},deselect:function(){
this.select(true);
},onClick:function(e){
this.defaultClickAction();
},_setIcon:function(_10,pos,num,sel){
var i="icon"+num,n="iconNode"+num,p="iconPos"+num;
if(_10){
this[i]=_10;
}
if(pos){
if(this[p]===pos){
return;
}
this[p]=pos;
}
if(_10&&_10!=="none"){
if(!this.iconDivNode){
this.iconDivNode=_5.create("DIV",{className:"mblTabBarButtonDiv"},this.anchorNode,"first");
}
if(!this[n]){
this[n]=_5.create("div",{className:"mblTabBarButtonIcon"},this.iconDivNode);
}else{
_5.empty(this[n]);
}
_7.createIcon(_10,this[p],null,this.alt,this[n]);
if(this[p]){
_4.add(this[n].firstChild,"mblTabBarButtonSpriteIcon");
}
_4.remove(this.iconDivNode,"mblTabBarButtonNoIcon");
this[n].style.visibility=sel?"hidden":"";
}else{
if(this.iconDivNode){
_4.add(this.iconDivNode,"mblTabBarButtonNoIcon");
}
}
},_setIcon1Attr:function(_11){
this._setIcon(_11,null,1,this.selected);
},_setIcon2Attr:function(_12){
this._setIcon(_12,null,2,!this.selected);
},_setIconPos1Attr:function(pos){
this._setIcon(null,pos,1,this.selected);
},_setIconPos2Attr:function(pos){
this._setIcon(null,pos,2,!this.selected);
},_setLabelAttr:function(_13){
this.label=_13;
this.box.innerHTML=this._cv?this._cv(_13):_13;
}});
}); | PypiClean |
/LAM-0.4.7.tar.gz/LAM-0.4.7/src/process.py | # Standard libraries
from decimal import Decimal
import inspect
import math
import re
import warnings
# Other packages
import numpy as np
import pandas as pd
import pathlib as pl
import shapely.geometry as gm
from shapely.ops import polygonize
from scipy.ndimage import morphology as mp
from skimage.morphology import medial_axis
from skimage.filters import gaussian
from skimage.transform import resize as resize_arr
from skimage.measure import find_contours
# LAM modules
from src.settings import Store, Settings as Sett
import src.plotfuncs as pfunc
import src.logger as lg
import src.system as system
LAM_logger = None
class GetSample:
"""Collect sample data and process for analysis."""
def __init__(self, sample_path: pl.Path, paths: system.Paths, process=True, projection=False):
self.name = sample_path.stem
self.sampledir = paths.samplesdir.joinpath(self.name)
self.group = self.name.split('_')[0]
# Add sample and group to storing variables
if self.name not in Store.samples:
Store.samples.append(self.name)
Store.samples = sorted(Store.samples)
if self.group not in Store.samplegroups:
Store.samplegroups.append(self.group)
Store.samplegroups = sorted(Store.samplegroups)
# Make folder for storing data and find data-containing files
if not self.sampledir.exists():
pl.Path.mkdir(self.sampledir)
self.channelpaths = list([p for p in sample_path.iterdir() if p.is_dir()])
self.channels = [str(p).split('_')[(-2)] for p in self.channelpaths]
self.vect_data = None
self.MP = None
self.data = None
self.vector = None
self.vector_length = None
if process is False and projection is True:
for channel in self.channels: # Store all found channel names
if (channel.lower() not in [c.lower() for c in Store.channels] and
channel.lower() != Sett.MPname.lower()):
Store.channels.append(channel)
self.find_sample_vector(paths.datadir)
def find_sample_vector(self, path): # path = data directory
"""Find sample's vector data."""
try: # Find sample's vector file
paths = list(self.sampledir.glob('Vector.*'))
self.vector = system.read_vector(paths)
self.vector_length = self.vector.length
length_series = pd.Series(self.vector_length, name=self.name)
system.save_to_file(length_series, path, 'Length.csv')
# If vector file not found
except (FileNotFoundError, IndexError):
msg = f'Vector-file NOT found for {self.name}'
lg.logprint(LAM_logger, msg, 'e')
print(f'ERROR: {msg}')
except (AttributeError, ValueError): # If vector file is faulty
msg = f'Faulty vector for {self.name}'
lg.logprint(LAM_logger, msg, 'c')
print(f'CRITICAL: {msg}')
def get_vect_data(self, channel):
"""Get channel data that is used for vector creation."""
try:
# Search string:
namer = str("_{}_".format(channel))
namerreg = re.compile(namer, re.I)
# Search found paths with string
dir_path = [self.channelpaths[i] for i, s in enumerate(self.channelpaths)
if namerreg.search(str(s))][0]
vect_path = next(dir_path.glob('*Position.csv'))
vect_data = system.read_data(vect_path, header=Sett.header_row) # Read data
except (FileNotFoundError, IndexError): # If data file not found
msg = 'No valid datafile for vector creation.'
if LAM_logger is not None:
lg.logprint(LAM_logger, msg, 'w')
print('-> {}'.format(msg))
vect_data = None
return vect_data
def create_skeleton(self):
# Extract point coordinates of the vector:
positions = self.vect_data
x, y = positions.loc[:, 'Position X'], positions.loc[:, 'Position Y']
coord_df, bin_array, skeleton = self.binarize_coords(x, y, Sett.SkeletonResize,
Sett.BDiter, Sett.SigmaGauss)
line_df = self.skeleton_vector(coord_df)
if line_df is not None and not line_df.empty:
system.save_to_file(line_df, self.sampledir, 'Vector.csv', append=False)
pfunc.skeleton_plot(self.sampledir, self.name, bin_array, skeleton)
def create_median(self):
# Extract point coordinates of the vector:
positions = self.vect_data
x, y = positions.loc[:, 'Position X'], positions.loc[:, 'Position Y']
line_df = self.median_vector(x, y, Sett.medianBins)
if line_df is not None and not line_df.empty:
system.save_to_file(line_df, self.sampledir, 'Vector.csv', append=False)
def binarize_coords(self, x_values, y_values, resize: float, bd_iter: int, sigma_gauss: float):
"""Create binary image from cell coordinates."""
def _binarize():
"""Transform XY into binary image and perform operations on it."""
# Create DF indices (X&Y-coords) with a buffer for operations:
buffer = 1000 * resize
# Get needed axis related variables:
x_max, x_min = round(max(x_values) + buffer), round(min(x_values) - buffer)
y_max, y_min = round(max(y_values) + buffer), round(min(y_values) - buffer)
y_size = round(y_max - y_min)
x_size = round(x_max - x_min)
# Create binary array
binary_arr = np.zeros((y_size, x_size))
for coord in coords: # Set cell locations in array to True
binary_arr[round(coord[1] - y_min),
round(coord[0] - x_min)] = 1
if resize != 1:
y_size = round(y_size * resize)
x_size = round(x_size * resize)
binary_arr = resize_arr(binary_arr, (y_size, x_size))
binary_arr = np.where(binary_arr > 0, 1, 0)
# Create Series to store real coordinate labels
x_lbl = pd.Series(np.linspace(x_min, x_max, x_size), index=pd.RangeIndex(binary_arr.shape[1]))
y_lbl = pd.Series(np.linspace(y_min, y_max, y_size), index=pd.RangeIndex(binary_arr.shape[0]))
# BINARY DILATION
if bd_iter > 0:
struct = mp.generate_binary_structure(2, 2)
binary_arr = mp.binary_dilation(binary_arr, iterations=int(bd_iter), structure=struct)
# SMOOTHING
if sigma_gauss > 0: # Gaussian smoothing
binary_arr = gaussian(binary_arr, sigma=sigma_gauss)
# FIND CONTOURS
contours = find_contours(binary_arr, 0.5)
continuous = contours
try:
pol = gm.MultiPolygon(polygonize(continuous))
except (TypeError, NotImplementedError):
pol = gm.Polygon(continuous)
# CREATE ARRAY WITH FILLED SAMPLE OUTLINE
segm = _intersection(binary_arr.shape, pol)
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
bool_bin_arr = mp.binary_fill_holes(segm)
return bool_bin_arr, y_lbl, x_lbl
def _intersection(shape: tuple, pols: gm.multipolygon) -> np.ndarray:
"""Create binary array by filling given polygons."""
segment_array = np.zeros(shape)
# Loop rows of array
for ind in np.arange(shape[0]+1):
# Create LineString from row and see where it intersects with polygons
row = gm.LineString([(ind, 0), (ind, shape[1]+1)])
section = row.intersection(pols)
if section.is_empty:
continue
# Test the datatype of intersection:
if isinstance(section, gm.LineString):
_, miny, _, maxy = section.bounds
# Assign True to elements that fall within the polygons
segment_array[ind, round(miny):round(maxy)] = 1
# If the results gave collections of objects:
elif isinstance(section, (gm.MultiLineString, gm.collection.GeometryCollection)):
# Assign values from each object
for geom in section.geoms:
_, miny, _, maxy = geom.bounds
segment_array[ind, round(miny):round(maxy)] = 1
return segment_array
coords = list(zip(x_values, y_values))
# Transform to binary
bin_array, bin_arr_ind, bin_arr_cols = _binarize()
# Make skeleton and get coordinates of skeleton pixels
# skeleton = skeletonize(bin_array)
skeleton = medial_axis(bin_array)
skel_values = [(bin_arr_ind.iat[y], bin_arr_cols.iat[x]) for y, x in zip(*np.where(skeleton == 1))]
# Dataframe from skeleton coords
coord_df = pd.DataFrame(skel_values, columns=['Y', 'X']).infer_objects()
return coord_df, bin_array, skeleton
def skeleton_vector(self, coord_df):
"""Create vector by skeletonization of image-transformed positions."""
def _score_nearest(test_point):
# DataFrame for storing relevant info on pixel coordinates
score = pd.DataFrame(np.zeros((nearest.size, 6)), index=nearest,
columns=['rads', 'dist_test', 'dist_vect', 'penalty', 'X', 'Y'])
score.X, score.Y = coord_df.X, coord_df.Y
# Get direction of test point
test_x, test_y = test_point.x - last_point.x, test_point.y - last_point.y
test_rad = math.atan2(test_y, test_x)
# Calculate scoring variables
shifts = pd.DataFrame(data=(score.X - last_point.x, score.Y - last_point.y), columns=nearest).T
score.rads = shifts.apply(lambda p, r=test_rad: abs(math.atan2(p.iat[1], p.iat[0]) - r), axis=1)
score.dist_vect = score.apply(lambda p, t=last_point: t.distance(gm.Point(p.X, p.Y)), axis=1)
score.dist_test = score.apply(lambda p, t=test_point: t.distance(gm.Point(p.X, p.Y)), axis=1)
score.penalty = score.dist_vect + score.dist_test + (score.rads * 10)
# Drop values that would turn the vector to move backwards
score.penalty.loc[score.rads > 1.9] = np.nan
#print(sum(abs(shifts.loc[score.penalty.idxmin(), :].values)))
return score
def _find_pixel(line, s_x, s_y, flag=False):
# Establish the vector's direction and project a forward point:
test_point = define_scoring_point(line, s_x, s_y)
# Calculate scoring of pixels
scores = _score_nearest(test_point)
# Get the pixels that are behind current vector coord
forfeit = scores.loc[((scores.dist_test > scores.dist_vect) & scores.rads > 1.3) |
(scores.penalty == np.nan)].index
# Find the pixel with the smallest penalty and add to vector:
try:
best = scores.penalty.idxmin()
x_2, y_2 = coord_df.X.at[best], coord_df.Y.at[best]
# Drop used pixel and pixels falling behind vector
forfeit = forfeit.append(pd.Index([best], dtype='int64'))
coord_df.drop(forfeit, inplace=True)
# Set found pixel for the next loop
line.append((x_2, y_2))
s_x, s_y = x_2, y_2
except (ValueError, KeyError):
flag = True
return line, s_x, s_y, flag
# BEGIN CREATION OF VECTOR FROM SKELETON COORDS
finder = Sett.find_dist # Distance for detection of nearby XY
line = [] # For storing vector
# Start from mean coordinates of pixels with smallest x-coords
start = coord_df.nsmallest(5, 'X').idxmin()
s_x, s_y = coord_df.loc[start, 'X'].mean(), coord_df.loc[start, 'Y'].mean()
line.append((s_x, s_y))
coord_df.drop(start, inplace=True) # Drop the start coordinates from data
s_x, s_y = s_x + finder / 4, s_y
# Continue finding next pixels until flagged ready:
flag = False
while not flag:
# Current end point of line:
last_point = gm.Point(line[-1])
# Find pixels near to the current coordinate
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
nearest = coord_df[(abs(coord_df.X - s_x) <= finder) &
(abs(coord_df.Y - s_y) <= finder)].index
if nearest.size >= 1:
line, s_x, s_y, flag = _find_pixel(line, s_x, s_y)
else:
flag = True
# Create LineString-object from finished vector:
try:
xy_coord = gm.LineString(line).simplify(Sett.simplifyTol).xy
linedf = pd.DataFrame(data=list(zip(xy_coord[0], xy_coord[1])), columns=['X', 'Y'])
# If something went wrong with creation, warn
except (ValueError, AttributeError):
linedf = pd.DataFrame().assign(X=[line[0][0]], Y=[line[0][1]])
msg = 'Faulty vector for {}'.format(self.name)
if LAM_logger is not None:
lg.logprint(LAM_logger, msg, 'e')
print("WARNING: Faulty vector. Try different settings")
return linedf
def median_vector(self, x_values, y_values, creation_bins):
"""Create vector by calculating median coordinates."""
# Divide sample to equidistant points between min & max X-coord:
bins = np.linspace(x_values.min(), x_values.max(), creation_bins)
idx = np.digitize(x_values, bins, right=True)
y_median = np.zeros(creation_bins)
# Find median Y-coord at first bin:
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
startval = np.nanmean(y_values[(idx == 1)])
y_median[0] = startval
# Then find median for the rest of the bins
for b in range(1, creation_bins):
cells = y_values[idx == b]
if cells.size == 0: # If no cells at bin, copy previous Y-coord
y_median[b] = y_median[b - 1]
else:
y_median[b] = y_values[idx == b].min() + (y_values[idx == b].max() - y_values[idx == b].min()) / 2
# Change bins and their medians into XY-coordinates
xy_median = [p for p in tuple(np.stack((bins, y_median), axis=1)) if ~np.isnan(p).any()]
# Create LineString-object from finished vector, simplify, and get new coords
xy_coord = gm.LineString(xy_median).simplify(Sett.simplifyTol).xy
linedf = pd.DataFrame(data=list(zip(xy_coord[0], xy_coord[1])), columns=['X', 'Y'])
return linedf
def get_mps(self, mp_name: str, use_mp: bool, datadir: pl.Path) -> pd.Series:
"""Collect MPs for sample anchoring."""
if use_mp:
try: # Get measurement point for anchoring
mp_dir_path = next(self.channelpaths.pop(i) for i, s in enumerate(self.channelpaths) if
str('_' + mp_name + '_') in str(s))
mp_path = next(mp_dir_path.glob("*Position.csv"))
mp_data = system.read_data(mp_path, header=Sett.header_row, test=False)
mp_data = mp_data.loc[:, ['Position X', 'Position Y']]
if not mp_data.empty:
mp_bin = self.project_mps(mp_data, datadir, filename="MPs.csv")
mp_df = pd.DataFrame({'MP': mp_bin.values})
mp_df.to_csv(self.sampledir.joinpath("MPs.csv"), index=False)
except (StopIteration, ValueError, UnboundLocalError):
mp_bin = None
msg = f'could not find MP position for {self.name}'
lg.logprint(LAM_logger, msg, 'e')
print(" -> Failed to find MP position data.")
else: # Sets measurement point values to zero when MP's are not used
mp_bin = pd.Series(0, name=self.name)
system.save_to_file(mp_bin, datadir, "MPs.csv")
system.save_to_file(mp_bin, self.sampledir, "MPs.csv", append=False)
return mp_bin
def project_mps(self, positions, datadir, filename="some.csv"):
"""For the projection of spot coordinates onto the vector."""
xy_positions = list(zip(positions['Position X'], positions['Position Y']))
points = (gm.Point(c) for c in xy_positions)
# Find points of projection and respective normalized linear reference along vector
projected = [self.vector.interpolate(self.vector.project(p)) for p in points]
positions["NormDist"] = [self.vector.project(p, normalized=True) for p in projected]
# Get bins for the projected MPs
positions["DistBin"] = pd.cut(positions.loc[:, "NormDist"], bins=np.linspace(0, 1, Sett.projBins+1),
labels=np.arange(0, Sett.projBins), include_lowest=True)
# Format output and sanity check on results:
self.data = positions
self.test_projection(Sett.MPname)
mp_bin = pd.Series(positions.loc[:, "DistBin"], name=self.name)
system.save_to_file(mp_bin.astype(int), datadir, filename)
return mp_bin
def project_channel(self, channel):
"""For projecting coordinates onto the vector."""
data = channel.data
xy_positions = list(zip(data['Position X'], data['Position Y']))
points = gm.MultiPoint(xy_positions)
# Get linear references of projections, then interpolate coordinates and get normalized references.
proj_vector_dist = [self.vector.project(p) for p in points.geoms]
proj_points = [self.vector.interpolate(d) for d in proj_vector_dist]
data["NormDist"] = [d / self.vector_length for d in proj_vector_dist]
# Based on projection, assign bins for features on the channel
data["DistBin"] = pd.cut(data.loc[:, "NormDist"], labels=np.arange(0, Sett.projBins), bins=np.linspace(0, 1, Sett.projBins+1), include_lowest=True).astype('int')
# Format output, plus a sanity check on results
data["VectPoint"] = [(round(p.x, 3), round(p.y, 3)) for p in proj_points]
data["ProjDist"] = [v.distance(p) for (v, p) in zip(points.geoms, proj_points)]
system.save_to_file(data, self.sampledir, f'{channel.name}.csv', append=False)
self.data = data
self.test_projection(channel.name)
return data
def find_counts(self, channel_name, datadir):
"""Gather projected features and find bin counts."""
counts = np.bincount(self.data['DistBin'], minlength=Sett.projBins)
counts = pd.Series(np.nan_to_num(counts), name=self.name)
channel_string = f'All_{channel_name}.csv'
system.save_to_file(counts, datadir, channel_string)
if channel_name == Sett.vectChannel:
test_count_projection(counts, self.name)
def test_projection(self, name):
if self.data["DistBin"].isna().any():
msg = "All features were not projected. Check vector and data."
print(f" -> {name}: {msg}")
class VectorError(Exception):
"""Exception when missing sample vectors."""
def __init__(self, samples, message='CRITICAL: vectors not found for all samples.'):
self.samples = samples
self.message = message
super().__init__(self.message)
class GetChannel:
"""Find and read channel data plus additional data."""
def __init__(self, path, sample, data_keys, datadir):
self.sample = sample
self.datafail = []
self.datadir = datadir
self.name = str(path.stem).split('_')[-2]
self.path = path
self.pospath = next(self.path.glob("*Position.csv"))
self.data = self.read_channel(self.pospath)
self.read_additional(data_keys)
if 'ClusterID' in self.data.columns:
Store.clusterPaths.append(self.path)
def read_channel(self, path):
"""Read channel data into a dataframe."""
try:
data = system.read_data(str(path), header=Sett.header_row)
channel = self.name
if channel.lower() not in [c.lower() for c in Store.channels] and channel.lower() != Sett.MPname.lower():
Store.channels.append(self.name)
return data
except ValueError:
lg.logprint(LAM_logger, 'Cannot read channel path {}'.format(path),
'ex')
def read_additional(self, data_keys):
"""Read relevant additional data of channel."""
def _test_variance(data):
"""Test if additional data column contains variance."""
for column in data.columns.difference(['ID']):
test = data.loc[:, column].dropna()
test = (test - test.min()) / test.max()
if test.std() < 0.01:
self.datafail.append(column)
data.loc[:, column] = np.nan
return data
def _rename_id(data):
"""Rename filename identification of channel."""
# I.e. as defined by settings.channelID
for column in data.columns:
id_str = str(column).split('_')[-1]
if id_str in Sett.channelID.keys():
new_id = Sett.channelID.get(id_str)
data.rename(columns={column: column.replace(f'_{id_str}', f'-{new_id}')}, inplace=True)
return data
add_data = pd.DataFrame(self.data.loc[:, 'ID'])
for key, values in data_keys.items():
paths = list(self.path.glob(f'*{values[0]}*'))
if not paths:
print(f"-> {self.name} {key} file not found")
continue
if len(paths) == 1:
namer = re.compile(f'^{key}', re.I)
if paths[0] == self.pospath and any(self.data.columns.str.contains(namer)):
continue
if paths[0] == self.pospath and not any(self.data.columns.str.contains(namer)):
print(f"'{key}' not in {self.pospath.name} of {self.sample.name} on channel {self.name}")
temp_data = system.read_data(str(paths[0]), header=Sett.header_row)
cols = temp_data.columns.map(lambda x, matcher=namer: bool(re.match(matcher, x)) or x == 'ID')
temp_data = temp_data.loc[:, cols]
add_data = pd.merge(add_data, temp_data, on='ID')
else: # If multiple files, e.g. intensity, get all
for path in paths:
# Search identifier for column from filename
strings = str(path.stem).split(f'{values[0]}_')
id_string = strings[1].split('_')[0]
# Locate columns
temp_data = system.read_data(str(path), header=Sett.header_row)
temp_data = temp_data.loc[:, [key, 'ID']]
for col in [c for c in temp_data.columns if c != 'ID']:
rename = str(col + '_' + id_string)
temp_data.rename(columns={key: rename}, inplace=True)
add_data = pd.merge(add_data, temp_data, on='ID')
# Drop invariant data
add_data = _test_variance(add_data)
if Sett.replaceID:
add_data = _rename_id(add_data)
self.data = pd.merge(self.data, add_data, on='ID')
class Normalize:
"""Anchor sample data into dataframe with all samples."""
def __init__(self, path):
self.path = pl.Path(path)
self.channel = str(self.path.stem).split('_')[1]
self.counts = system.read_data(path, header=0, test=False)
self.starts = None
def averages(self, norm_counts: pd.DataFrame):
"""Find bin averages of channels."""
# Find groups of each sample based on samplenames
samples = norm_counts.columns.tolist()
groups = set({s.casefold(): s.split('_')[0] for s in samples}.values())
# cols = ["{}_All".format(g) for g in groups]
averages = pd.DataFrame(index=norm_counts.index)
for grp in groups: # For each group found in data
namer = "{}_".format(grp)
group_data = norm_counts.loc[:, norm_counts.columns.str.startswith(namer)]
# Calculate group averages
averages.loc[:, "{}_All".format(grp)] = group_data.mean(axis=1)
# Save average data
filename = str('ChanAvg_{}.csv'.format(self.channel))
system.save_to_file(averages, self.path.parent, filename, append=False)
def avg_add_data(self, paths: system.Paths, data_names: dict, total_len: int):
"""Find bin averages of additional data."""
samples = self.starts.index
for sample in samples:
sample_dir = paths.samplesdir.joinpath(sample)
data_file = sample_dir.glob(str(self.channel + '.csv'))
data = system.read_data(next(data_file), header=0)
for data_type in data_names.keys():
sample_data = data.loc[:, data.columns.str.contains(str(data_type))]
if sample_data.empty:
continue
binned_data = data.loc[:, 'DistBin']
bins = np.arange(0, Sett.projBins)
for col in sample_data:
avg_s = pd.Series(np.full(total_len, np.nan), name=sample)
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
insert = [np.nanmean(sample_data.loc[binned_data == i, col]) for i in bins]
insert = [0 if np.isnan(v) else v for v in insert]
start = int(self.starts.at[sample])
end = int(start + Sett.projBins)
avg_s[start:end] = insert
filename = str('Avg_{}_{}.csv'.format(self.channel, col))
system.save_to_file(avg_s, paths.datadir, filename)
def normalize_samples(self, mps, array_length, center, name=None):
"""For inserting sample data into larger matrix, centered with MP."""
# Create empty data array => insert in DF
cols = self.counts.columns
arr = np.full((array_length, len(cols)), np.nan)
data = pd.DataFrame(arr, columns=cols)
# Create empty series for holding each sample's starting index
sample_start = pd.Series(np.full(len(cols), np.nan), index=cols)
for col in self.counts.columns:
handle = self.counts[col].values
mp_bin = mps.at[0, col]
# Insert sample's count data into larger, anchored dataframe:
insert, insx = relate_data(handle, mp_bin, center, array_length)
data[col] = insert
# Save starting index of the sample
sample_start.at[col] = insx
check_anchor_quality(sample_start)
# Save anchored data
if name is None:
name = f'Norm_{self.channel}'
filename = f'{name}.csv'
data = data.sort_index(axis=1)
system.save_to_file(data, self.path.parent, filename, append=False)
return sample_start, data
class DefineWidths:
"""Find widths of samples along the vector."""
def __init__(self, data, vector, path, datadir):
self.name = path.name
self.sampledir = path
self.data = data
self.vector = vector
# Determine width:
self.data = self.point_handedness()
self.average_width(datadir)
def point_handedness(self):
"""
Find handedness of projected points compared to vector.
Returns DF with added column 'hand', with possible values [-1, 0, 1]
that correspond to [right side, on vector, left side] respectively.
"""
def _get_sign(arr, p1_x, p1_y, p2_x, p2_y):
"""Find which side of vector a feature is."""
x_val, y_val = arr[0], arr[1]
val = math.copysign(1, (p2_x - p1_x) * (y_val - p1_y) - (p2_y - p1_y) * (x_val - p1_x))
return val
# Define bin edges
edges, edge_points = self.get_vector_edges(multip=2)
data = self.data.sort_values(by='NormDist')
# Find features in every bin and define hand-side
for ind, point1 in enumerate(edge_points[:-1]):
point2 = edge_points[ind+1]
p1x, p1y = point1.x, point1.y
p2x, p2y = point2.x, point2.y
d_index = data.loc[(data.NormDist >= edges[ind]) & (data.NormDist < edges[ind+1])].index
points = data.loc[d_index, ['Position X', 'Position Y']]
# Assign hand-side of features
data.loc[d_index, 'hand'] = points.apply(_get_sign, args=(p1x, p1y, p2x, p2y), axis=1, raw=True
).replace(np.nan, 0)
data = data.sort_index()
# Save calculated data
channel_string = str('{}.csv'.format(Sett.vectChannel))
system.save_to_file(data, self.sampledir, channel_string, append=False)
return data
def get_vector_edges(self, multip=1, points=True):
"""Divide vector to segments."""
edges = np.linspace(0, 1, Sett.projBins*multip)
if points:
edge_points = [self.vector.interpolate(d, normalized=True) for d in edges]
return edges, edge_points
return edges
def average_width(self, datadir):
"""Calculate width based on feature distance and side."""
def _get_approx_width(sub_data):
"""Approximate sample's width at bin."""
width = 0
for val in [-1, 1]:
distances = sub_data.loc[(sub_data.hand == val)].ProjDist
if not distances.empty:
temp = distances.groupby(pd.qcut(distances, 10, duplicates='drop')).mean()
if not temp.empty:
width += temp.tolist()[-1]
return width
edges = self.get_vector_edges(multip=2, points=False)
cols = ['NormDist', 'ProjDist', 'hand']
data = self.data.sort_values(by='NormDist').loc[:, cols]
# Create series to hold width results
res = pd.Series(name=self.name, index=pd.RangeIndex(stop=len(edges)), dtype=np.float64)
# Loop segments and get widths:
for ind, _ in enumerate(edges[:-1]):
d_index = data.loc[(data.NormDist >= edges[ind]) & (data.NormDist < edges[ind+1])].index
res.iat[ind] = _get_approx_width(data.loc[d_index, :])
filename = 'Sample_widths.csv'
system.save_to_file(res, datadir, filename)
def create_samples(paths: system.Paths):
"""Create vectors for the samples."""
lg.logprint(LAM_logger, 'Begin vector creation.', 'i')
print("---Processing samples---")
# Test that resize-setting is in step of 0.1:
if Sett.SkeletonVector:
check_resize_step(Sett.SkeletonResize)
# Loop Through samples to create vectors
for path in [p for p in Sett.workdir.iterdir() if p.is_dir() and p.stem != 'Analysis Data']:
sample = GetSample(path, paths)
print("{} ...".format(sample.name))
sample.vect_data = sample.get_vect_data(Sett.vectChannel)
# Creation of vector for projection
if Sett.SkeletonVector:
sample.create_skeleton()
else:
sample.create_median()
sample_dirs = [p for p in paths.samplesdir.iterdir() if p.is_dir()]
pfunc.create_vector_plots(Sett.workdir, paths.samplesdir, sample_dirs)
lg.logprint(LAM_logger, 'Vectors created.', 'i')
def find_existing(paths: system.Paths):
"""Get MPs and count old projections when not projecting during 'Count'."""
msg = 'Collecting pre-existing data.'
print(msg)
lg.logprint(LAM_logger, msg, 'i')
mps = pd.DataFrame(columns=Store.samples)
for smpl in Store.samples:
smplpath = paths.samplesdir.joinpath(smpl)
# FIND MP
if Sett.useMP:
try:
mp_df = pd.read_csv(smplpath.joinpath('MPs.csv'))
mp_bin = mp_df.iat[0, 0]
except FileNotFoundError:
msg = "MP-data not found."
add = "Provide MP-data or set useMP to False."
print(f"ERROR: {msg}\n{add}")
raise SystemExit
else:
mp_bin = 0
mps.loc[0, smpl] = mp_bin
# FIND CHANNEL COUNTS
for path in [p for p in smplpath.iterdir() if p.suffix == '.csv' and p.stem not in ['Vector', 'MPs',
Sett.MPname]]:
data = pd.read_csv(path)
try:
counts = np.bincount(data['DistBin'], minlength=Sett.projBins)
counts = pd.Series(np.nan_to_num(counts), name=smpl)
channel_string = str(f'All_{path.stem}.csv')
system.save_to_file(counts, paths.datadir, channel_string)
except ValueError: # If channel has not been projected
print(f"Missing projection data: {path.stem} - {smpl}")
print("-> Set project=True and perform Count")
continue
mps.to_csv(paths.datadir.joinpath('MPs.csv'))
samples = mps.columns.tolist()
groups = set({s.casefold(): s.split('_')[0] for s in samples}.values())
Store.samplegroups = sorted(groups)
def get_counts(paths):
"""Handle data to anchor samples and find cell counts."""
try: # Test that MPs are found for the sample
mps = system.read_data(next(paths.datadir.glob('MPs.csv')), header=0, test=False)
except (FileNotFoundError, StopIteration):
msg = "MPs.csv NOT found!"
print("ERROR: {}".format(msg))
lg.logprint(LAM_logger, msg, 'c')
msg = "-> Perform 'Count' before continuing.\n"
print("{}".format(msg))
lg.logprint(LAM_logger, msg, 'i')
raise SystemExit
# Find the smallest and largest anchor bin-number of the dataset
mp_max, mp_min = mps.max(axis=1).values[0], mps.min(axis=1).values[0]
# Store the bin number of the row onto which samples are anchored to
Store.center = mp_max
# Find the size of needed dataframe, i.e. so that all anchored samples fit
mp_diff = mp_max - mp_min
if not any([Sett.process_counts, Sett.process_samples]):
# Find all sample groups in the analysis from the found MPs.
found_samples = [p for p in paths.samplesdir.iterdir() if p.is_dir()]
samples = mps.columns.tolist()
if len(found_samples) != len(samples): # Test whether sample numbers match
msg = "Mismatch of sample N between MPs.csv and sample folders"
print('WARNING: {}'.format(msg))
lg.logprint(LAM_logger, msg, 'w')
groups = set({s.casefold(): s.split('_')[0] for s in samples}.values())
Store.samplegroups = sorted(groups)
Store.channels = [c.stem.split('_')[1] for c in paths.datadir.glob("All_*.csv")]
try: # If required lengths of matrices haven't been defined because
# Process and Count are both False, get the sizes from files.
chan = Sett.vectChannel
path = paths.datadir.joinpath("Norm_{}.csv".format(chan))
temp = system.read_data(path, test=False, header=0)
Store.totalLength = temp.shape[0] # Length of anchored matrices
path = paths.datadir.joinpath("All_{}.csv".format(chan))
temp = system.read_data(path, test=False, header=0)
Sett.projBins = temp.shape[0]
except AttributeError:
msg = "Cannot determine length of sample matrix\n-> Must perform 'Count' before continuing."
lg.logprint(LAM_logger, msg, 'c')
print("ERROR: {}".format(msg))
return
# The total length of needed matrix when using 'Count'
Store.totalLength = int(Sett.projBins + mp_diff)
# Counting and anchoring of data:
if Sett.process_counts:
lg.logprint(LAM_logger, 'Begin normalization of channels.', 'i')
print('\n---Normalizing sample data---')
# Get combined channel files of all samples
countpaths = paths.datadir.glob('All_*')
for path in countpaths:
name = str(path.stem).split('_')[1]
print(' {} ...'.format(name))
# Anchor sample's data to the full data matrix
ch_counts = Normalize(path)
ch_counts.starts, norm_counts = ch_counts.normalize_samples(mps, Store.totalLength, Store.center)
# Get average bin counts
ch_counts.averages(norm_counts)
# Get averages of additional data per bin
ch_counts.avg_add_data(paths, Sett.AddData, Store.totalLength)
# Approximate width of sample
if Sett.measure_width:
print(' Width ...')
width_path = paths.datadir.joinpath('Sample_widths.csv')
width_counts = Normalize(width_path)
_, _ = width_counts.normalize_samples(mps * 2, Store.totalLength * 2, Store.center * 2,
name='Sample_widths_norm')
lg.logprint(LAM_logger, 'Channels normalized.', 'i')
def project(paths):
"""Project features onto the vector."""
lg.logprint(LAM_logger, 'Begin channel projection and counting.', 'i')
print("\n---Projecting and counting channels---")
# Loop through all directories in the root directory
for path in [p for p in Sett.workdir.iterdir() if p.is_dir() and p.stem != 'Analysis Data']:
# Initialize sample variables
sample = GetSample(path, paths, process=False, projection=True)
print(f" {sample.name} ...")
# Find anchoring point of the sample
sample.MP = sample.get_mps(Sett.MPname, Sett.useMP, paths.datadir)
# Collection of data for each channel of the sample
for path2 in [p for p in sample.channelpaths if Sett.MPname.lower() != str(p).split('_')[-2].lower()]:
channel = GetChannel(path2, sample, Sett.AddData, paths.datadir)
# If no variance in found additional data, it is discarded.
if channel.datafail:
datatypes = ', '.join(channel.datafail)
info = "Invariant data discarded"
msg = f" -> {info} - {channel.name}: {datatypes}"
print(msg)
# Project features of channel onto vector
sample.data = sample.project_channel(channel)
if channel.name == Sett.vectChannel and Sett.measure_width:
DefineWidths(sample.data, sample.vector, sample.sampledir, paths.datadir)
# Count occurrences in each bin
if channel.name not in ["MPs"]:
sample.find_counts(channel.name, paths.datadir)
lg.logprint(LAM_logger, 'All channels projected and counted.', 'i')
def relate_data(data, mp_bin=0, center=50, total_length=100):
"""Place sample data in context of all data, i.e. anchoring."""
try:
length = data.shape[0]
except AttributeError:
length = len(data)
if np.isnan(mp_bin):
msg = "Missing MP-projection(s). See 'Analysis Data/MPs.csv'."
print(f"CRITICAL: {msg}")
lg.logprint(LAM_logger, msg, 'c')
raise SystemExit
# Insert smaller input data into larger DF defined by TotalLength
insx = int(center - mp_bin)
end = int(insx + length)
insert = np.full(total_length, np.nan) # Bins outside input data are NaN
data = np.where(data == np.nan, 0, data) # Set all NaN in input to 0
try: # Insertion
insert[insx:end] = data
except ValueError:
msg = "relate_data() call from {} line {}".format(inspect.stack()[1][1], inspect.stack()[1][2])
print('ERROR: {}'.format(msg))
lg.logprint(LAM_logger, f'Failed {msg}\n', 'ex')
msg = "If not using MPs, remove MPs.csv from 'Data Files'."
if insert[insx:end].size - length == mp_bin:
lg.logprint(LAM_logger, msg, 'i')
raise SystemExit
return insert, insx
def vector_test(path):
"""Test that vector-files are found."""
paths = [p for p in path.iterdir() if p.is_dir()]
miss_vector = []
for samplepath in paths:
try:
_ = next(samplepath.glob("Vector.*"))
except StopIteration:
miss_vector.append(samplepath.name)
continue
if not miss_vector:
return
raise VectorError(miss_vector)
def test_count_projection(counts, name):
if (counts == 0).sum() > counts.size / 3:
print("\n")
print('WARNING: Uneven projection <- vector may be faulty!')
print("\n")
print('\a')
lg.logprint(LAM_logger, f'Uneven projection for {name}. Check vector quality.', 'w')
def check_resize_step(resize, log=True):
if Sett.SkeletonVector and Decimal(str(resize)) % Decimal(str(0.10)) != Decimal('0.0'):
msg = 'Resizing not in step of 0.1'
print("WARNING: {}".format(msg))
# Round setting down to nearest 0.1.
Sett.SkeletonResize = math.floor(resize*10) / 10
msg2 = 'SkeletonResize changed to {}'.format(Sett.SkeletonResize)
print("-> {}".format(msg2))
if log:
lg.logprint(LAM_logger, msg, 'w')
lg.logprint(LAM_logger, msg2, 'i')
def define_scoring_point(line, s_x, s_y):
points, point2 = line[-3:-2], line[-1]
if not points:
point1 = (s_x, s_y)
elif len(points) == 1:
point1 = points[0]
else:
p1, p2 = points[0], points[1]
point1 = ((p1.x + p2.x) / 2, (p1.y + p2.y) / 2)
# Create a test point (used to score nearby pixels)
shiftx = point2[0] - point1[0] # shift in x for test point
shifty = point2[1] - point1[1] # shift in y for test point
return gm.Point(s_x + shiftx, s_y + shifty)
def check_anchor_quality(sample_start):
mean = np.mean(sample_start.astype('float'))
if mean == 0:
return
std = np.std(sample_start.astype('float'))
threshold = 2.5 * std
outliers = sample_start[np.abs(sample_start - mean) >= threshold]
if not outliers.empty:
print(f"WARNING: Samples with outlying anchoring. Check anchoring and vector of:" +
f"\n - {', '.join(outliers.index)}") | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/plugins/codemirror/js/addon/dialog/dialog.js |
// Open simple dialogs on top of an editor. Relies on dialog.css.
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
function dialogDiv(cm, template, bottom) {
var wrap = cm.getWrapperElement();
var dialog;
dialog = wrap.appendChild(document.createElement("div"));
if (bottom) {
dialog.className = "CodeMirror-dialog CodeMirror-dialog-bottom";
} else {
dialog.className = "CodeMirror-dialog CodeMirror-dialog-top";
}
if (typeof template == "string") {
dialog.innerHTML = template;
} else { // Assuming it's a detached DOM element.
dialog.appendChild(template);
}
return dialog;
}
function closeNotification(cm, newVal) {
if (cm.state.currentNotificationClose)
cm.state.currentNotificationClose();
cm.state.currentNotificationClose = newVal;
}
CodeMirror.defineExtension("openDialog", function(template, callback, options) {
closeNotification(this, null);
var dialog = dialogDiv(this, template, options && options.bottom);
var closed = false, me = this;
function close() {
if (closed) return;
closed = true;
dialog.parentNode.removeChild(dialog);
}
var inp = dialog.getElementsByTagName("input")[0], button;
if (inp) {
if (options && options.value) inp.value = options.value;
CodeMirror.on(inp, "keydown", function(e) {
if (options && options.onKeyDown && options.onKeyDown(e, inp.value, close)) { return; }
if (e.keyCode == 13 || e.keyCode == 27) {
inp.blur();
CodeMirror.e_stop(e);
close();
me.focus();
if (e.keyCode == 13) callback(inp.value);
}
});
if (options && options.onKeyUp) {
CodeMirror.on(inp, "keyup", function(e) {options.onKeyUp(e, inp.value, close);});
}
if (options && options.value) inp.value = options.value;
inp.focus();
CodeMirror.on(inp, "blur", close);
} else if (button = dialog.getElementsByTagName("button")[0]) {
CodeMirror.on(button, "click", function() {
close();
me.focus();
});
button.focus();
CodeMirror.on(button, "blur", close);
}
return close;
});
CodeMirror.defineExtension("openConfirm", function(template, callbacks, options) {
closeNotification(this, null);
var dialog = dialogDiv(this, template, options && options.bottom);
var buttons = dialog.getElementsByTagName("button");
var closed = false, me = this, blurring = 1;
function close() {
if (closed) return;
closed = true;
dialog.parentNode.removeChild(dialog);
me.focus();
}
buttons[0].focus();
for (var i = 0; i < buttons.length; ++i) {
var b = buttons[i];
(function(callback) {
CodeMirror.on(b, "click", function(e) {
CodeMirror.e_preventDefault(e);
close();
if (callback) callback(me);
});
})(callbacks[i]);
CodeMirror.on(b, "blur", function() {
--blurring;
setTimeout(function() { if (blurring <= 0) close(); }, 200);
});
CodeMirror.on(b, "focus", function() { ++blurring; });
}
});
/*
* openNotification
* Opens a notification, that can be closed with an optional timer
* (default 5000ms timer) and always closes on click.
*
* If a notification is opened while another is opened, it will close the
* currently opened one and open the new one immediately.
*/
CodeMirror.defineExtension("openNotification", function(template, options) {
closeNotification(this, close);
var dialog = dialogDiv(this, template, options && options.bottom);
var duration = options && (options.duration === undefined ? 5000 : options.duration);
var closed = false, doneTimer;
function close() {
if (closed) return;
closed = true;
clearTimeout(doneTimer);
dialog.parentNode.removeChild(dialog);
}
CodeMirror.on(dialog, 'click', function(e) {
CodeMirror.e_preventDefault(e);
close();
});
if (duration)
doneTimer = setTimeout(close, options.duration);
});
}); | PypiClean |
/Django-Pizza-16.10.1.tar.gz/Django-Pizza-16.10.1/pizza/kitchen_sink/static/ks/ckeditor/plugins/clipboard/dialogs/paste.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.dialog.add("paste",function(c){function h(a){var b=new CKEDITOR.dom.document(a.document),f=b.getBody(),d=b.getById("cke_actscrpt");d&&d.remove();f.setAttribute("contenteditable",!0);if(CKEDITOR.env.ie&&8>CKEDITOR.env.version)b.getWindow().on("blur",function(){b.$.selection.empty()});b.on("keydown",function(a){var a=a.data,b;switch(a.getKeystroke()){case 27:this.hide();b=1;break;case 9:case CKEDITOR.SHIFT+9:this.changeFocus(1),b=1}b&&a.preventDefault()},this);c.fire("ariaWidget",new CKEDITOR.dom.element(a.frameElement));
b.getWindow().getFrame().removeCustomData("pendingFocus")&&f.focus()}var e=c.lang.clipboard;c.on("pasteDialogCommit",function(a){a.data&&c.fire("paste",{type:"auto",dataValue:a.data})},null,null,1E3);return{title:e.title,minWidth:CKEDITOR.env.ie&&CKEDITOR.env.quirks?370:350,minHeight:CKEDITOR.env.quirks?250:245,onShow:function(){this.parts.dialog.$.offsetHeight;this.setupContent();this.parts.title.setHtml(this.customTitle||e.title);this.customTitle=null},onLoad:function(){(CKEDITOR.env.ie7Compat||
CKEDITOR.env.ie6Compat)&&"rtl"==c.lang.dir&&this.parts.contents.setStyle("overflow","hidden")},onOk:function(){this.commitContent()},contents:[{id:"general",label:c.lang.common.generalTab,elements:[{type:"html",id:"securityMsg",html:'<div style="white-space:normal;width:340px">'+e.securityMsg+"</div>"},{type:"html",id:"pasteMsg",html:'<div style="white-space:normal;width:340px">'+e.pasteMsg+"</div>"},{type:"html",id:"editing_area",style:"width:100%;height:100%",html:"",focus:function(){var a=this.getInputElement(),
b=a.getFrameDocument().getBody();!b||b.isReadOnly()?a.setCustomData("pendingFocus",1):b.focus()},setup:function(){var a=this.getDialog(),b='<html dir="'+c.config.contentsLangDirection+'" lang="'+(c.config.contentsLanguage||c.langCode)+'"><head><style>body{margin:3px;height:95%}</style></head><body><script id="cke_actscrpt" type="text/javascript">window.parent.CKEDITOR.tools.callFunction('+CKEDITOR.tools.addFunction(h,a)+",this);<\/script></body></html>",f=CKEDITOR.env.air?"javascript:void(0)":CKEDITOR.env.ie?
"javascript:void((function(){"+encodeURIComponent("document.open();("+CKEDITOR.tools.fixDomain+")();document.close();")+'})())"':"",d=CKEDITOR.dom.element.createFromHtml('<iframe class="cke_pasteframe" frameborder="0" allowTransparency="true" src="'+f+'" role="region" aria-label="'+e.pasteArea+'" aria-describedby="'+a.getContentElement("general","pasteMsg").domId+'" aria-multiple="true"></iframe>');d.on("load",function(a){a.removeListener();a=d.getFrameDocument();a.write(b);c.focusManager.add(a.getBody());
CKEDITOR.env.air&&h.call(this,a.getWindow().$)},a);d.setCustomData("dialog",a);a=this.getElement();a.setHtml("");a.append(d);if(CKEDITOR.env.ie){var g=CKEDITOR.dom.element.createFromHtml('<span tabindex="-1" style="position:absolute" role="presentation"></span>');g.on("focus",function(){setTimeout(function(){d.$.contentWindow.focus()})});a.append(g);this.focus=function(){g.focus();this.fire("focus")}}this.getInputElement=function(){return d};CKEDITOR.env.ie&&(a.setStyle("display","block"),a.setStyle("height",
d.$.offsetHeight+2+"px"))},commit:function(){var a=this.getDialog().getParentEditor(),b=this.getInputElement().getFrameDocument().getBody(),c=b.getBogus(),d;c&&c.remove();d=b.getHtml();setTimeout(function(){a.fire("pasteDialogCommit",d)},0)}}]}]}}); | PypiClean |
/BNIAJFI_VitalSigns-0.0.13.tar.gz/BNIAJFI_VitalSigns-0.0.13/VitalSigns/rbintel.py |
__all__ = ['vsDom', 'cashsa', 'reosa']
# Cell
import numpy as np
import pandas as pd
# import geopandas
import geopandas as gpd
# from geopandas import GeoDataFrame
# Cell
def vsDom(df, yr):
id = '30'
shortname = 'dom'
fincol = id+'-'+shortname+year
# Create the Numerator and Denominator
numer = df[['DaysonMark','CSA2010']].copy()
# Filter Em
numer = numer[ numer['DaysonMark'] > 0]
print( numer.shape[0] )
# Get Bcity Val
bCityVal = numer.median(numeric_only=True)['DaysonMark']
# Group by CSA
numer = numer.groupby('CSA2010').median(numeric_only=True) # use .median to calculate DOM.
# Make sure ALL csas and BaltimoreCity are included and sorted.
numer = csa.merge( numer, left_on='CSA2010', right_on='CSA2010', how='outer' )
numer.drop( columns=['geometry', 'Shape__Length','Shape__Area', 'OBJECTID', 'tpop10'], inplace=True)
# Bcity is the median of all the records an not the community medians.
# Incorrect Bcity median IFF Groupby keeps a 'False' row (index 56)
numer.at[55,'DaysonMark']= bCityVal
# Perform the calculation
numer[fincol] = numer['DaysonMark']
compareYears = gpd.read_file("https://services1.arcgis.com/mVFRs7NF4iFitgbY/ArcGIS/rest/services/"+shortname.capitalize()+"/FeatureServer/0/query?where=1%3D1&outFields=*&returnGeometry=true&f=pgeojson");
goback = 2 if year == '19' else 3 if year == '20' else 1
prevYear = shortname + str( int(year) - goback )
if prevYear in compareYears.columns:
numer = numer.merge( compareYears[['CSA2010', prevYear]], left_on='CSA2010', right_on='CSA2010', how='outer' )
numer['change'] = numer[id+'-'+shortname+year] - numer[ prevYear ]
numer['percentChange'] = numer['change' ] / numer[ prevYear ] * 100
numer['change'] = numer['change'].apply(lambda x: "{:.2f}".format(x) )
print( 'Records Matching Query: ', numer.size / len(numer.columns) )
return numer
# Cell
def cashsa(df, yr):
id = '38'
shortname = 'cashsa'
fincol = id+'-'+shortname+year
# Create the Numerator and Denominator
numer = df[['BuyerFinan','CSA2010']].copy()
numer['count'] = 1
denom = numer.copy()
# Filter Em
numer = numer[ numer['BuyerFinan'].str.contains('.Cash.|.Cash|Cash.|Cash', regex=True, na=False) ]
print("LENGTH AFTER FILTER: ", len(numer) )
# Get Bcity Val
bCityVal = numer.sum(numeric_only=True)['count']
bCityValDenom = denom.sum(numeric_only=True)['count']
# Group by CSA
numer = numer.groupby('CSA2010').sum(numeric_only=True)
denom = denom.groupby('CSA2010').sum(numeric_only=True)
# Make sure ALL csas and BaltimoreCity are included and sorted.
numer = csa.merge( numer, left_on='CSA2010', right_on='CSA2010', how='outer' )
numer.drop( columns=['geometry', 'Shape__Length','Shape__Area', 'OBJECTID', 'tpop10'], inplace=True)
denom = csa.merge( denom, left_on='CSA2010', right_on='CSA2010', how='outer' )
denom.drop( columns=['geometry', 'Shape__Length','Shape__Area', 'OBJECTID', 'tpop10'], inplace=True)
# Bcity is the sum of the community sums.
# Incorrect Bcity Sum IFF Groupby keeps a 'False' row (index 56)
numer.at[55,'count']= bCityVal
denom.at[55,'count']= bCityValDenom
# Perform the calculation
numer[fincol] = numer['count'] / denom['count'] * 100
compareYears = gpd.read_file("https://services1.arcgis.com/mVFRs7NF4iFitgbY/ArcGIS/rest/services/"+shortname.capitalize()+"/FeatureServer/0/query?where=1%3D1&outFields=*&returnGeometry=true&f=pgeojson");
goback = 2 if year == '19' else 3 if year == '20' else 1
prevYear = shortname + str( int(year) - goback )
if prevYear in compareYears.columns:
numer = numer.merge( compareYears[['CSA2010', prevYear]], left_on='CSA2010', right_on='CSA2010', how='outer' )
numer['change'] = numer[id+'-'+shortname+year] - numer[ prevYear ]
numer['percentChange'] = numer['change' ] / numer[ prevYear ] * 100
numer['change'] = numer['change'].apply(lambda x: "{:.2f}".format(x) )
print( 'Records Matching Query: ', numer.size / len(numer.columns) )
return numer
# Cell
def reosa(df, yr):
id = '39'
shortname = 'reosa'
fincol = id+'-'+shortname+year
# Create the Numerator and Denominator
numer = df[['Foreclosur','CSA2010']].copy()
numer['count'] = 1
denom = numer.copy()
denom['count'] = 1
# Filter Em
numer = numer[ numer['Foreclosur'].str.contains('.Y.|.Y|Y.|Y', regex=True, na=False) ]
print( numer['Foreclosur'].value_counts() )
# Get Bcity Val
bCityVal = numer.sum(numeric_only=True)['count']
bCityValDenom = denom.sum(numeric_only=True)['count']
# Group by CSA
numer = numer.groupby('CSA2010').sum(numeric_only=True)
denom = denom.groupby('CSA2010').sum(numeric_only=True)
# Make sure ALL csas and BaltimoreCity are included and sorted.
numer = csa.merge( numer, left_on='CSA2010', right_on='CSA2010', how='outer' )
numer.drop( columns=['geometry', 'Shape__Length','Shape__Area', 'OBJECTID', 'tpop10'], inplace=True)
denom = csa.merge( denom, left_on='CSA2010', right_on='CSA2010', how='outer' )
denom.drop( columns=['geometry', 'Shape__Length','Shape__Area', 'OBJECTID', 'tpop10'], inplace=True)
# Bcity is the sum of the community sums.
# Incorrect Bcity Sum IFF Groupby keeps a 'False' row (index 56)
numer.at[55,'count']= bCityVal
denom.at[55,'count']= bCityValDenom
# Perform the calculation
numer['denomCount'] = denom['count']
numer[fincol] = numer['count'] / numer['denomCount'] * 100
compareYears = gpd.read_file("https://services1.arcgis.com/mVFRs7NF4iFitgbY/ArcGIS/rest/services/"+shortname.capitalize()+"/FeatureServer/0/query?where=1%3D1&outFields=*&returnGeometry=true&f=pgeojson");
goback = 2 if year == '19' else 3 if year == '20' else 1
prevYear = shortname + str( int(year) - goback )
if prevYear in compareYears.columns:
numer = numer.merge( compareYears[['CSA2010', prevYear]], left_on='CSA2010', right_on='CSA2010', how='outer' )
numer['change'] = numer[id+'-'+shortname+year] - numer[ prevYear ]
numer['percentChange'] = numer['change' ] / numer[ prevYear ] * 100
numer['change'] = numer['change'].apply(lambda x: "{:.2f}".format(x) )
print( 'Records Matching Query: ', numer.size / len(numer.columns) )
return numer | PypiClean |
/GReNaDIne-0.0.21.tar.gz/GReNaDIne-0.0.21/grenadine/Preprocessing/rnaseq_normalization.py | import pandas as pd
import numpy as np
try:
from rpy2.robjects.packages import importr
import rpy2.robjects as ro
from rpy2.robjects import Formula
from rpy2.robjects import pandas2ri
deseq2 = importr('DESeq2')
base = importr('base')
biogeneric = importr('BiocGenerics')
sum_exp = importr('SummarizedExperiment')
pandas2ri.activate()
except:
print("Warning: could not import R components. DEseq2() function cannot be called.")
__author__ = "Sergio Peignier"
__copyright__ = "Copyright 2019, The GReNaDIne Project"
__credits__ = ["Nicolas Parisot"]
__license__ = "GPL"
__version__ = "0.0.1"
__maintainer__ = "Sergio Peignier"
__email__ = "sergio.peignier@insa-lyon.fr"
__status__ = "pre-alpha"
def RPM(raw_counts):
"""
Reads Per Million.
Args:
raw_counts (pandas.DataFrame): raw RNAseq counts where rows are genes
and columns are conditions
Returns:
pandas.DataFrame: Normalized counts
Examples:
>>> import numpy as np
>>> np.random.seed(0)
>>> import pandas as pd
>>> nb_genes = 1000
>>> nb_conditions = 5
>>> raw_counts = np.random.randint(0,1e6,(nb_genes,nb_conditions))
>>> raw_counts = pd.DataFrame(raw_counts)
>>> rpm = RPM(raw_counts)
>>> rpm.head()
0 1 2 3 4
0 1994.031850 617.999738 895.038590 234.467249 1929.409246
1 308.104674 1783.727269 738.867008 604.569366 245.491264
2 1235.090833 906.128463 769.221953 1462.698984 1474.653899
3 628.576824 344.838319 1723.115991 1201.585019 1084.225878
4 1921.133736 1363.195297 761.440687 1481.020714 1777.679267
"""
# per million factor: nb of reads per condition divided by 10^6
pmf = raw_counts.sum(axis=0)/1e6
rpm = raw_counts / pmf
return(rpm)
def RPK(raw_counts, seq_lengths, seq_in_kb=False):
"""
Reads Per Kilobase normalization.
Args:
raw_counts (pandas.DataFrame): raw RNAseq counts where rows are genes
and columns are conditions
seq_lengths (pandas.Series): sequences DNA lengths
seq_in_kb (bool): True if lengths in kb, False otherwise
Returns:
pandas.DataFrame: Normalized counts
Examples:
>>> import numpy as np
>>> np.random.seed(0)
>>> import pandas as pd
>>> nb_genes = 1000
>>> nb_conditions = 5
>>> raw_counts = np.random.randint(0,1e6,(nb_genes,nb_conditions))
>>> raw_counts = pd.DataFrame(raw_counts)
>>> seq_lengths = np.random.randint(100,20000,nb_genes)
>>> seq_lengths = pd.Series(seq_lengths)
>>> rpk = RPK(raw_counts, seq_lengths)
>>> rpk.head()
0 1 2 3 4
0 321202.997719 99612.577387 142010.101010 38433.365917 313911.697621
1 26853.843441 155566.114245 63431.417489 53620.768688 21611.248237
2 97195.319962 71353.390640 59624.960204 117133.237822 117212.034384
3 132006.796941 72465.590484 356436.703483 256785.896347 229981.733220
4 48384.227419 34354.424576 18889.143614 37956.492944 45220.490091
"""
seq_lengths = seq_lengths[raw_counts.index]
seq_lengths = seq_lengths.fillna(seq_lengths.mean())
if not seq_in_kb:
seq_lengths_in_kb = seq_lengths / 1e3
else:
seq_lengths_in_kb = seq_lengths
rpk = (raw_counts.T / seq_lengths_in_kb).T
return(rpk)
def RPKM(raw_counts, seq_lengths, seq_in_kb=False):
"""
Reads Per Kilobase Million (also known as FPM: Fragments per kilobase).
Args:
raw_counts (pandas.DataFrame): raw RNAseq counts where rows are genes
and columns are conditions
seq_lengths (pandas.Series): sequences DNA lengths
seq_in_kb (bool): True if lengths in kb, False otherwise
Returns:
pandas.DataFrame: Normalized counts
Examples:
>>> import numpy as np
>>> np.random.seed(0)
>>> import pandas as pd
>>> nb_genes = 1000
>>> nb_conditions = 5
>>> raw_counts = np.random.randint(0,1e6,(nb_genes,nb_conditions))
>>> raw_counts = pd.DataFrame(raw_counts)
>>> seq_lengths = np.random.randint(100,20000,nb_genes)
>>> seq_lengths = pd.Series(seq_lengths)
>>> rpkm = RPKM(raw_counts, seq_lengths)
>>> rpkm.head()
0 1 2 3 4
0 649.733415 201.368439 291.638511 76.398582 628.676848
1 54.320288 314.479420 130.265692 106.588393 43.281252
2 196.607901 144.242035 122.448576 232.839698 234.742741
3 267.024989 146.490365 731.994898 510.443933 460.588733
4 97.872216 69.448026 38.791619 75.450645 90.563924
"""
# reads per million
rpm = RPM(raw_counts=raw_counts)
rpkm = RPK(rpm, seq_lengths, seq_in_kb=False)
return(rpkm)
def TPM(raw_counts, seq_lengths, seq_in_kb=False):
"""
Transcript Per Million normalization.
Args:
raw_counts (pandas.DataFrame): raw RNAseq counts where rows are genes
and columns are conditions
seq_lengths (pandas.Series): sequences DNA lengths
seq_in_kb (bool): True if lengths in kb, False otherwise
Returns:
pandas.DataFrame: Normalized counts
Examples:
>>> import numpy as np
>>> np.random.seed(0)
>>> import pandas as pd
>>> nb_genes = 1000
>>> nb_conditions = 5
>>> raw_counts = np.random.randint(0,1e6,(nb_genes,nb_conditions))
>>> raw_counts = pd.DataFrame(raw_counts)
>>> seq_lengths = np.random.randint(100,20000,nb_genes)
>>> seq_lengths = pd.Series(seq_lengths)
>>> tpm = TPM(raw_counts, seq_lengths)
>>> tpm.head()
0 1 2 3 4
0 2455.468465 739.530213 1103.147117 265.510632 2397.256398
1 205.286894 1154.932887 492.740902 370.430324 165.039097
2 743.019352 529.732184 463.172003 809.195846 895.115733
3 1009.139172 537.989227 2768.832068 1773.963432 1756.306584
4 369.878069 255.049468 146.732550 262.216233 345.336316
"""
rpk = RPK(raw_counts, seq_lengths, seq_in_kb)
tpm = RPM(rpk)
return(tpm)
def DEseq2(raw_counts,col_data,rlog=True):
"""
Apply R DEseq2 normalization.
Args:
raw_counts (pandas.DataFrame): raw RNAseq counts where rows are genes
and columns are conditions
col_data (pandas.DataFrame): Two columns, one corresponding to ids of
each condition (individuals), and one with the experiment id
(if many repetitions)
Returns:
pandas.DataFrame: Normalized counts
Example:
>>> import pandas as pd
>>> import numpy as np
>>> np.random.seed(0)
>>> raw_counts = pd.DataFrame(np.random.randint(0,1000,(20,10)),
columns = ["Z"+str(i) for i in range(10)])
>>> col_data = pd.DataFrame([["Z0","1"],
["Z1","2"],
["Z2","3"],
["Z3","4"],
["Z4","5"],
["Z5","6"],
["Z6","7"],
["Z7","8"],
["Z8","9"],
["Z9","10"]
],columns=["individuals","conditions"])
>>> raw_counts.columns = col_data["individuals"]
>>> col_data.index = col_data['individuals']
>>> DEseq2(raw_counts,col_data,rlog=False)
individuals X0 X1 ... X8 X9
0 408.025477 382.991634 ... 7.745300 611.474516
1 165.238388 516.593367 ... 270.224902 596.251084
2 289.912839 377.510537 ... 727.197585 60.893728
3 463.502625 627.585575 ... 385.543809 718.884285
4 59.056319 674.174898 ... 364.029087 243.574911
5 573.263865 181.561329 ... 129.948918 570.878697
6 304.229522 314.477925 ... 802.068816 44.824550
7 537.472156 376.825400 ... 36.144732 373.819828
8 323.914962 608.401737 ... 748.712307 100.643800
9 464.695682 294.608949 ... 781.414683 535.357356
10 559.543710 57.551516 ... 112.737140 822.065324
11 517.786716 123.324676 ... 618.763389 768.783312
12 222.505121 584.421939 ... 81.755942 166.612005
13 361.496256 175.395095 ... 333.047888 515.905193
14 330.476775 666.638390 ... 779.693506 312.926101
15 331.073304 653.620785 ... 493.978005 787.389729
16 437.851901 84.271862 ... 483.650938 347.601696
17 466.485268 28.090621 ... 750.433484 9.303208
18 459.326926 210.337087 ... 149.742461 468.543405
19 221.312064 126.065225 ... 662.653421 435.559302
"""
if len(col_data.index) != len(raw_counts.columns):
print('each raw of col_data should correspond to each column of raw_counts ... dataframes shapes mismatches')
return(1)
if (col_data.index!=raw_counts.columns).any():
col_data.index = raw_counts.columns
dds = deseq2.DESeqDataSetFromMatrix(raw_counts,col_data,Formula('~ conditions'))
dds = deseq2.DESeq(dds)
res = deseq2.results(dds)
if rlog:
rlog_counts = deseq2.rlog(dds)
normed_counts = sum_exp.assay(rlog_counts)
else:
normed_counts = biogeneric.counts(dds,True)
normed_counts = pd.DataFrame(normed_counts)
normed_counts.index = raw_counts.index
normed_counts.columns = raw_counts.columns
return(normed_counts)
def log(X,base=10, pseudocount=1):
"""
Add a pseudocount and apply the log transformation with a given base.
Args:
X (pandas.DataFrame or numpy.array): gene expression matrix
base (float): logarithm base
pseudocount(float): pseudocount value
Returns:
pandas.DataFrame or numpy.array: log transformed gene expression matrix
Examples:
>>> import pandas as pd
>>> import numpy as np
>>> np.random.seed(0)
>>> data = pd.DataFrame(np.random.randn(5, 5),
index=["c1", "c2", "c3", "c4", "c5"],
columns=["gene1", "gene2", "gene3", "gene4", "gene5"])
>>> pseudocount = -np.min(data.values)+1
>>> log_data = log(data, pseudocount=pseudocount)
>>> log_data
gene1 gene2 gene3 gene4 gene5
c1 0.725670 0.596943 0.656264 0.762970 0.734043
c2 0.410897 0.653509 0.531687 0.537790 0.598089
c3 0.567853 0.699600 0.634883 0.565218 0.601718
c4 0.589577 0.703039 0.524764 0.587268 0.431186
c5 0.000000 0.623932 0.645169 0.448834 0.765128
"""
X_log = np.log(X+pseudocount)
return(X_log/np.log(base)) | PypiClean |
/BIP-0.6.16.tar.gz/BIP-0.6.16/distribute_setup.py | import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.3"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: 0.6c9
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
"""
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
assert python_cmd('setup.py', 'install')
finally:
os.chdir(old_wd)
def _build_egg(tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
# returning the result
for file in os.listdir(to_dir):
if fnmatch.fnmatch(file, 'distribute-%s*.egg' % DEFAULT_VERSION):
return os.path.join(to_dir, file)
raise IOError('Could not build the egg.')
finally:
os.chdir(old_wd)
def _do_download(version, download_base, to_dir, download_delay):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
egg = _build_egg(tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of distribute (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U distribute'."
"\n\n(Currently using %r)") % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir, download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir, download_delay)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
import urllib2
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
try:
from setuptools.sandbox import DirectorySandbox
def _violation(*args):
pass
DirectorySandbox._violation = _violation
except ImportError:
pass
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
else:
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
def after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-0.6c9-py%s.egg-info' % pyver
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
def before_install():
log.warn('Before install bootstrap.')
fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
elif option == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:]) | PypiClean |
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/address/en_IN/__init__.py | from .. import Provider as AddressProvider
class Provider(AddressProvider):
# City and States names taken from wikipedia
# Street format taken from some common famous places in India
# Link for cities: https://en.wikipedia.org/wiki/List_of_cities_in_India_by_population
# Link for States: https://en.wikipedia.org/wiki/States_and_union_territories_of_India
# Links for street name formats: https://www.mumbai77.com/city/3313/travel/old-new-street-names/
city_formats = ("{{city_name}}",)
street_name_formats = (
"{{last_name}} Nagar",
"{{last_name}} Zila",
"{{last_name}} Street",
"{{last_name}} Ganj",
"{{last_name}} Road",
"{{last_name}} Path",
"{{last_name}} Marg",
"{{last_name}} Chowk",
"{{last_name}} Circle",
"{{last_name}}",
)
street_address_formats = (
"{{building_number}}, {{street_name}}",
"{{building_number}}\n{{street_name}}",
)
address_formats = (
"{{street_address}}\n{{city}} {{postcode}}",
"{{street_address}}\n{{city}}-{{postcode}}",
"{{street_address}}, {{city}} {{postcode}}",
"{{street_address}}, {{city}}-{{postcode}}",
)
building_number_formats = ("H.No. ###", "###", "H.No. ##", "##", "##/##", "##/###")
postcode_formats = ("######",)
cities = (
"Mumbai",
"Delhi",
"Kolkata",
"Chennai",
"Bangalore",
"Hyderabad",
"Ahmedabad",
"Kanpur",
"Pune",
"Surat",
"Jaipur",
"Lucknow",
"Nagpur",
"Indore",
"Bhopal",
"Ludhiana",
"Patna",
"Visakhapatnam",
"Vadodara",
"Agra",
"Thane",
"Kalyan-Dombivli",
"Varanasi",
"Ranchi",
"Nashik",
"Dhanbad",
"Faridabad",
"Meerut",
"Pimpri-Chinchwad",
"Howrah",
"Allahabad",
"Ghaziabad",
"Rajkot",
"Amritsar",
"Jabalpur",
"Coimbatore",
"Madurai",
"Srinagar",
"Aurangabad",
"Solapur",
"Vijayawada",
"Jodhpur",
"Gwalior",
"Guwahati",
"Chandigarh",
"Hubli–Dharwad",
"Mysore",
"Tiruchirappalli",
"Bareilly",
"Jalandhar",
"Navi Mumbai",
"Salem",
"Kota",
"Vasai-Virar",
"Aligarh",
"Moradabad",
"Bhubaneswar",
"Gorakhpur",
"Raipur",
"Bhiwandi",
"Kochi",
"Jamshedpur",
"Bhilai",
"Amravati",
"Cuttack",
"Warangal",
"Bikaner",
"Mira-Bhayandar",
"Guntur",
"Bhavnagar",
"Durgapur",
"Kolhapur",
"Ajmer",
"Asansol",
"Ulhasnagar",
"Siliguri",
"Jalgaon",
"Saharanpur",
"Jamnagar",
"Bhatpara",
"Sangli-Miraj & Kupwad",
"Kozhikode",
"Nanded",
"Ujjain",
"Dehradun",
"Rourkela",
"Gulbarga",
"Tirunelveli",
"Malegaon",
"Akola",
"Belgaum",
"Mangalore",
"Bokaro",
"South Dumdum",
"Udaipur",
"Gaya",
"Maheshtala",
"Jhansi",
"Nellore",
"Jammu",
"Thiruvananthapuram",
"Davanagere",
"Kollam",
"Panihati",
"Kurnool",
"Tiruppur",
"Dhule",
"Bhagalpur",
"Rajpur Sonarpur",
"Kakinada",
"Thrissur",
"Bellary",
"Muzaffarnagar",
"Korba",
"Rajahmundry",
"Kamarhati",
"Ambattur",
"Berhampur",
"Ahmednagar",
"Muzaffarpur",
"Noida",
"Patiala",
"Mathura",
"New Delhi",
"Latur",
"Sambalpur",
"Shahjahanpur",
"Kulti",
"Chandrapur",
"Nizamabad",
"Rohtak",
"Bardhaman",
"Rampur",
"Bhilwara",
"Firozabad",
"Bilaspur",
"Shimoga",
"Agartala",
"Gopalpur",
"Darbhanga",
"Panipat",
"Bally",
"Alwar",
"Parbhani",
"Ichalkaranji",
"Anantapuram",
"Baranagar",
"Tumkur",
"Ramagundam",
"Jalna",
"Durg",
"Sagar",
"Bihar Sharif",
"Dewas",
"Barasat",
"Avadi",
"Farrukhabad",
"Aizawl",
"Tirupati",
"Bijapur",
"Satara",
"Satna",
"Ratlam",
"Imphal",
"Pondicherry",
"North Dumdum",
"Anantapur",
"Khammam",
"Ozhukarai",
"Bathinda",
"Thoothukudi",
"Thanjavur",
"Naihati",
"Sonipat",
"Mau",
"Tiruvottiyur",
"Hapur",
"Sri Ganganagar",
"Karnal",
"Etawah",
"Nagercoil",
"Raichur",
"Raurkela Industrial Township",
"Secunderabad",
"Karimnagar",
"Mirzapur",
"Bharatpur",
"Ambarnath",
"Arrah",
"Uluberia",
"Serampore",
"Dindigul",
"Gandhinagar",
"Burhanpur",
"Nadiad",
"Eluru",
"Yamunanagar",
"Kharagpur",
"Munger",
"Pali",
"Katni",
"Singrauli",
"Tenali",
"Sikar",
"Silchar",
"Rewa",
"Sambhal",
"Machilipatnam",
"Vellore",
"Alappuzha",
"Bulandshahr",
"Haridwar",
"Vijayanagaram",
"Erode",
"Gurgaon",
"Bidar",
"Bhusawal",
"Khandwa",
"Purnia",
"Haldia",
"Chinsurah",
"Bhiwani",
"Raebareli",
"Junagadh",
"Bahraich",
"Gandhidham",
"Mango",
"Raiganj",
"Amroha",
"Sultan Pur Majra",
"Hospet",
"Bidhannagar",
"Malda",
"Sirsa",
"Berhampore",
"Jaunpur",
"Surendranagar Dudhrej",
"Madhyamgram",
"Kirari Suleman Nagar",
"Bhind",
"Nandyal",
"Chittoor",
"Bhalswa Jahangir Pur",
"Fatehpur",
"Morena",
"Nangloi Jat",
"Ongole",
"Karawal Nagar",
"Shivpuri",
"Morbi",
"Unnao",
"Pallavaram",
"Kumbakonam",
"Shimla",
"Mehsana",
"Panchkula",
"Orai",
"Ambala",
"Dibrugarh",
"Guna",
"Danapur",
"Sasaram",
"Anand",
"Kottayam",
"Hazaribagh",
"Kadapa",
"Saharsa",
"Nagaon",
"Loni",
"Hajipur",
"Dehri",
"Bettiah",
"Katihar",
"Deoghar",
"Jorhat",
"Siwan",
"Panvel",
"Hosur",
"Tinsukia",
"Bongaigaon",
"Motihari",
"Jamalpur",
"Suryapet",
"Begusarai",
"Miryalaguda",
"Proddatur",
"Karaikudi",
"Kishanganj",
"Phusro",
"Buxar",
"Tezpur",
"Jehanabad",
"Aurangabad",
"Chapra",
"Ramgarh",
"Gangtok",
"Adoni",
"Amaravati",
"Ballia",
"Bhimavaram",
"Dharmavaram",
"Giridih",
"Gudivada",
"Guntakal",
"Hindupur",
"Kavali",
"Khora ",
"Ghaziabad",
"Madanapalle",
"Mahbubnagar",
"Medininagar",
"Narasaraopet",
"Phagwara",
"Pudukkottai",
"Srikakulam",
"Tadepalligudem",
"Tadipatri",
"Udupi",
)
states = (
"Andhra Pradesh",
"Arunachal Pradesh",
"Assam",
"Bihar",
"Chhattisgarh",
"Goa",
"Gujarat",
"Haryana",
"Himachal Pradesh",
"Jharkhand",
"Karnataka",
"Kerala",
"Madhya Pradesh",
"Maharashtra",
"Manipur",
"Meghalaya",
"Mizoram",
"Nagaland",
"Odisha",
"Punjab",
"Rajasthan",
"Sikkim",
"Tamil Nadu",
"Telangana",
"Tripura",
"Uttar Pradesh",
"Uttarakhand",
"West Bengal",
)
def city_name(self) -> str:
return self.random_element(self.cities)
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit | PypiClean |
/DMS_APP-0.2.1-py3-none-any.whl/dms_app/resources/truckMaster/check_list_master.py | from flask import request
from flask_restx import Resource, fields, reqparse
from ...db.db_connection import database_access
from ...namespace import api
from ...response_helper import get_response
import logging
import json
from bson import json_util
from bson.objectid import ObjectId
from ..login_register.login import token_required, token_required_get
delete_checklist_master = reqparse.RequestParser()
delete_checklist_master.add_argument("_id", type=str, required=True)
post_checklist_master = api.model("CheckListMasterAdd", {
"stages": fields.Raw(
[],
required="true",
example=[
{
"applicable_movement": [
{
"value": "inbound",
"label": "Inbound",
}
],
"enable": "enable",
"applicable_stage": "yard1",
"mandatory": "yes",
},
{
"applicable_movement": [
{
"value": "outbound",
"label": "Outbound",
}
],
"enable": "enable",
"applicable_stage": "gate1",
"mandatory": "no",
},
]
),
"checklist_details": fields.Raw(
[],
required="true",
example=[
{
"checklist_name": "nikhil35",
"created_by": "super_admin",
"created_on": "5/24/2022",
"status": "enabled",
"checklist_data":
[{
"parameter_name": "sdf",
"data_type": "string",
"mandatory": "true",
"critical": "",
"pass_condition": "",
"negative_value": "",
"positive_value": "",
"include_na": ""},
{
"parameter_name": "sdf",
"data_type": "string[]",
"mandatory": "true",
"critical": "yes",
"pass_condition": "negative",
"negative_value": "no",
"positive_value": "good",
"include_na": "NA"
}
]}
])
})
put_checklist_master = api.model("CheckListMasterUpdate", {
"_id": fields.String,
"stages": fields.Raw(
[],
required="true",
example=[
{
"applicable_movement": [
{
"value": "inbound",
"label": "Inbound",
}
],
"enable": "enable",
"applicable_stage": "yard1",
"mandatory": "yes",
},
{
"applicable_movement": [
{
"value": "outbound",
"label": "Outbound",
}
],
"enable": "enable",
"applicable_stage": "gate1",
"mandatory": "no",
},
]
),
"checklist_details": fields.Raw(
[],
required="true",
example=[
{
"checklist_name": "nikhil35",
"created_by": "super_admin",
"created_on": "5/24/2022",
"status": "enabled",
"checklist_data":
[{
"parameter_name": "sdf",
"data_type": "string",
"mandatory": "true",
"critical": "",
"pass_condition": "",
"negative_value": "",
"positive_value": "",
"include_na": ""},
{
"parameter_name": "sdf",
"data_type": "string[]",
"mandatory": "true",
"critical": "yes",
"pass_condition": "negative",
"negative_value": "no",
"positive_value": "good",
"include_na": "NA"
}
]},
])
})
get_checklist_master = reqparse.RequestParser()
get_checklist_master.add_argument("page_no", type=int, required=True, help="Page number")
get_checklist_master.add_argument("page_limit", type=int, required=True, help="limit ")
get_checklist_master.add_argument("status", type=str, help="Status")
get_checklist_master.add_argument("created_by", type=str, help="Created By")
class CheckListMaster(Resource):
@token_required_get
@api.expect(get_checklist_master)
def get(self, *args):
try:
database_connection = database_access()
checklist_master_collection = database_connection["dms_checklist_master"]
args = get_checklist_master.parse_args()
if args["status"] and args["created_by"]:
data = checklist_master_collection.find(
{"checklist_details.status": args["status"], "checklist_details.created_by": args["created_by"]})
count = checklist_master_collection.count_documents(
{"checklist_details.status": args["status"], "checklist_details.created_by": args["created_by"]})
total_count = checklist_master_collection.count_documents({})
if len(list(data)):
data = checklist_master_collection.find({"checklist_details.status": args["status"], "checklist_details.created_by": args["created_by"]}).skip(
args["page_limit"] * (args["page_no"] - 1)).limit(
args["page_limit"])
_response = get_response(200)
_response["data"] = json.loads(json_util.dumps(data))
_response["count"] = json.loads(json_util.dumps(count))
_response["total_count"] = json.loads(json_util.dumps(total_count))
return _response
else:
_response = get_response(404)
_response["data"] = []
_response["count"] = 0
return _response
elif args["status"]:
data = checklist_master_collection.find({"checklist_details.status": args["status"]})
count = checklist_master_collection.count_documents({"checklist_details.status": args["status"]})
total_count = checklist_master_collection.count_documents({})
if len(list(data)):
data = checklist_master_collection.find({"checklist_details.status": args["status"]}).skip(
args["page_limit"] * (args["page_no"] - 1)).limit(
args["page_limit"])
_response = get_response(200)
_response["data"] = json.loads(json_util.dumps(data))
_response["count"] = json.loads(json_util.dumps(count))
_response["total_count"] = json.loads(json_util.dumps(total_count))
return _response
else:
_response = get_response(404)
_response["data"] = []
_response["count"] = 0
return _response
elif args["created_by"]:
data = checklist_master_collection.find({"checklist_details.created_by": args["created_by"]})
count = checklist_master_collection.count_documents(
{"checklist_details.created_by": args["created_by"]})
total_count = checklist_master_collection.count_documents({})
if len(list(data)):
data = checklist_master_collection.find({"checklist_details.created_by": args["created_by"]}).skip(
args["page_limit"] * (args["page_no"] - 1)).limit(
args["page_limit"])
_response = get_response(200)
_response["data"] = json.loads(json_util.dumps(data))
_response["count"] = json.loads(json_util.dumps(count))
_response["total_count"] = json.loads(json_util.dumps(total_count))
return _response
else:
_response = get_response(404)
_response["data"] = []
_response["count"] = 0
return _response
else:
data = checklist_master_collection.find()
count = checklist_master_collection.count_documents({})
if len(list(data)):
data = checklist_master_collection.find().skip(
args["page_limit"] * (args["page_no"] - 1)).limit(
args["page_limit"])
_response = get_response(200)
_response["data"] = json.loads(json_util.dumps(data))
_response["count"] = count
return _response
else:
_response = get_response(404)
_response["data"] = []
_response["count"] = 0
return _response
except Exception as e:
_response = get_response(404)
_response['message'] = 'Failed to Get Checklist Data'
logging.error(e)
return _response
@token_required
@api.expect(post_checklist_master)
def post(self, *args):
try:
args = request.get_json()
database_connection = database_access()
checklist_master_collection = database_connection["dms_checklist_master"]
data = checklist_master_collection.find_one({"checklist_details.checklist_name": args["checklist_details"]["checklist_name"]})
if not data:
checklist_master_collection.insert_one(args)
return get_response(200)
else:
return get_response(409)
except Exception as e:
_response = get_response(404)
_response['message'] = 'Failed to Store Checklist Data'
logging.error(e)
return _response
@token_required
@api.expect(put_checklist_master)
def put(self, *args):
try:
args = request.get_json()
database_connection = database_access()
checklist_master_collection = database_connection["dms_checklist_master"]
data = checklist_master_collection.find_one({"_id": ObjectId(args["_id"])})
if data:
checklist_master_collection.update_one(
{"_id": ObjectId(args["_id"])},
{'$set': {"stages": args["stages"], "checklist_details": args["checklist_details"]}})
logging.info(get_response(200))
return get_response(200)
else:
logging.info(get_response(404))
return get_response(404)
except Exception as e:
_response = get_response(404)
_response['message'] = 'Failed to Update Checklist Data'
logging.error(e)
return _response
@token_required_get
@api.expect(delete_checklist_master)
def delete(self, *args):
try:
_id = request.args.get("_id")
database_connection = database_access()
checklist_master_collection = database_connection["dms_checklist_master"]
data = checklist_master_collection.find_one({"_id": ObjectId(_id)})
if data:
checklist_master_collection.delete_one({"_id": ObjectId(_id)})
logging.info(get_response(200))
return get_response(200)
else:
logging.info(get_response(404))
return get_response(404)
except Exception as e:
_response = get_response(404)
_response['message'] = 'Failed to Delete checklist data'
logging.error(e)
return _response
checklist_byid = reqparse.RequestParser()
checklist_byid.add_argument("_id", type=str, required=True, help="_id")
class GetCheckListById(Resource):
@token_required_get
@api.expect(checklist_byid)
def get(self, *args):
try:
_id = request.args.get("_id")
database_connection = database_access()
checklist_master_collection = database_connection["dms_checklist_master"]
data = checklist_master_collection.find_one({"_id": ObjectId(_id)})
if data:
data = checklist_master_collection.find_one({"_id": ObjectId(_id)})
_response = get_response(200)
_response["data"] = json.loads(json_util.dumps(data))
return _response
else:
_response = get_response(404)
_response["data"] = []
_response["count"] = 0
return _response
except Exception as e:
_response = get_response(404)
_response['message'] = 'Failed to Get Checklist Data'
logging.error(e)
return _response
checklist_master_by_stages = reqparse.RequestParser()
checklist_master_by_stages.add_argument("applicable_stage", type=str, required=True, help="Applicable Stage")
checklist_master_by_stages.add_argument("applicable_movement", type=str, required=True, help="_id")
class GetCheckListByStages(Resource):
@token_required_get
@api.expect(checklist_master_by_stages)
def get(self, *args):
try:
applicable_stage = request.args.get("applicable_stage")
applicable_movement = request.args.get("applicable_movement")
database_connection = database_access()
checklist_master_collection = database_connection["dms_checklist_master"]
data = list(checklist_master_collection.find(
{"stages": {"$elemMatch": {"applicable_stage": applicable_stage, "applicable_movement": {"$elemMatch": {"value":
applicable_movement}}}}, "checklist_details.status": "enabled"}))
count = checklist_master_collection.count_documents(
{"stages": {
"$elemMatch": {"applicable_stage": applicable_stage, "applicable_movement": {"$elemMatch": {"value":
applicable_movement}}}}, "checklist_details.status": "enabled"})
total_count = checklist_master_collection.count_documents({})
if len(data):
_response = get_response(200)
_response["data"] = json.loads(json_util.dumps(data))
_response["count"] = json.loads(json_util.dumps(count))
_response["total_count"] = json.loads(json_util.dumps(total_count))
return _response
else:
_response = get_response(404)
_response["data"] = []
_response["count"] = 0
return _response
except Exception as e:
_response = get_response(404)
_response['message'] = 'Failed to Get Checklist Data'
logging.error(e)
return _response | PypiClean |
/Oasis_Optimization-1.0.2-py3-none-any.whl/Oasis/__init__.py | __author__ = 'Mostafa Farrag'
__version__ = '1.0.2'
__docformat__ = 'restructuredtext'
# import os,sys
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
# def configuration(parent_package, top_path):
# config = Configuration('Oasis', parent_package, top_path)
# # need: auto add_subpackage from source availability
# config.add_subpackage('NSGA')
# return config
# setup(**configuration(top_path='').todict())
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None,parent_package,top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True,
)
# config.add_subpackage('NSGA')
config.add_subpackage('HS')
return config
import Oasis.constraint as constraint
import Oasis.objective as objective
import Oasis.variable as variable
import Oasis.parameter as parameter
import Oasis.history as history
import Oasis.gradient as gradient
import Oasis.optimization as optimization
import Oasis.optimizer as optimizer
# import Oasis.hsapi as hsapi
# from Oasis.HS import *
import Oasis.harmonysearch as harmonysearch
#import Oasis.ga as ga
# from Oasis.NSGA import *
# module level doc-string
__doc__ = """
Oasis is a Harmony search optimization algorithm which uses stochastic random
search based on two factors, harmony memory consideration rate and (HMCR) and
pitch adjusting rate (PAR). The main difference between GA and HS is that GA
evaluates many solutions simultaneously which may lead to convergence on a
local minimum, whereas HS evaluates only one solution at each iteration which
enables the algorithm of broad search and avoids convergence to local minima,
HS generates a new offspring after considering all the existing population
whereas GA only consider the two parent to generate a new offspring
(Lee and Geem 2005).
""" | PypiClean |
/HyFetch-1.4.10.tar.gz/HyFetch-1.4.10/README.md | # HyFetch
neofetch with pride flags <3
<img alt="screenshot" src="https://user-images.githubusercontent.com/22280294/197708447-ddee6db2-1017-48f2-b507-8ddf85acef0d.png">
### Running Updated Original Neofetch
This repo also serves as an updated version of the original `neofetch` since the upstream [dylanaraps/neofetch](https://github.com/dylanaraps/neofetch) doesn't seem to be maintained anymore (as of Jul 30, 2022, the original repo hasn't merged a pull request for 6 months). If you only want to use the updated neofetch without pride flags, you can use the `neofetch` script from this repo. To prevent command name conflict, I call it `neowofetch` :)
* Method 1: `pip install -U hyfetch` then run `neowofetch`
* Method 2: `npx neowofetch`
* Method 3: `P="$HOME/.local/bin/neowofetch" curl -L nf.hydev.org -o $P && chmod +x $P`
* Method 4: Run without install `bash <(curl -sL nf.hydev.org)`
## Installation
### Method 1: Install using Python pip (Recommended)
Install Python >= 3.7 first. Then, just do:
```sh
pip install -U hyfetch
```
### Method 2: Install using system package manager
Currently, these distributions have existing packages for HyFetch:
* Arch Linux: `sudo pacman -S hyfetch` (Thanks to [@Aleksana](https://github.com/Aleksanaa) and [@Antiz96](https://github.com/Antiz96))
* Nix: `nix-env -i hyfetch` (Thanks to [@YisuiDenghua](https://github.com/YisuiDenghua))
* Guix: `guix install hyfetch` (Thanks to [@WammKD](https://github.com/WammKD))
* Slackware `sbopkg -b hyfetch` Slackware [Slackbuild](https://slackbuilds.org/repository/15.0/desktop/hyfetch/?search=hyfetch) (Thanks to [@bittin](https://github.com/bittin) and Urchlay)
* Nix Profile `nix profile install nixpkgs#hyfetch`
### Method 3: Install the latest developmental version using git
Install Python >= 3.7 first. Then run the following commands:
```sh
git clone https://github.com/hykilpikonna/hyfetch.git
cd hyfetch
pip install .
```
## Usage
When you run `hyfetch` for the first time, it will prompt you to choose a color system and a preset. Just follow the prompt, and everything should work (hopefully). If something doesn't work, feel free to submit an issue!
If you want to use the updated `neofetch` without LGBTQ flags, check out [this section](https://github.com/hykilpikonna/hyfetch#running-updated-original-neofetch)
## Questions and answers
#### Q: How do I change my config?
A: Use `hyfetch -c`
#### Q: What do I do if the color is too dark/light for my theme?
A: You can try setting the colors' "lightness" in the configuration menu. The value should be between 0 and 1. For example, if you are using dark theme and the rainbow flag is too dark to display, you can set lightness to 0.7.
Feel free to experiment with it!

## Contributing
To make changes to our codebase, you first need to create a fork by clicking the "Fork" button on the top right. Then, you can clone your fork of the source code using `git clone https://github.com/{your username}/hyfetch.git`.
After making changes to the source code, you can run `python -m hyfetch` in the root directory of your repo to test out your changes.
If they work correctly, you can commit and push these changes using git command or Github Desktop. Then, you can create a pull request on Github so that it can go into our next release!
You can also install your version locally by running `pip install .` in the repo root.
## Change Log
### About Notation
Updates to HyFetch begins with the emoji 🌈
Updates to `neowofetch` begins with the emoji 🖼️
### TODO
* [ ] (Important!) Refactor flag storage & coloring to support non-stride patterns
* [ ] Config menu: Allow left-right arrow keys for pagination
### Nightly
Note: You can install the latest nightly version by using:
```sh
pip install git+https://github.com/hykilpikonna/hyfetch.git@master
```
<!-- CHANGELOG STARTS HERE --->
### 1.4.10
* 🌈 Add support for qwqfetch backend ([#148](https://github.com/hykilpikonna/hyfetch/pull/148))
* 🌈 Add nonhuman-unity flag ([#139](https://github.com/hykilpikonna/hyfetch/pull/139))
* 🌈 Add gynesexual, androsexual flags ([#157](https://github.com/hykilpikonna/hyfetch/pull/157))
* 🌈 Add option to disable pride month animation ([#134](https://github.com/hykilpikonna/hyfetch/pull/134))
* 🌈 Make ^C error message less aggressive (?)
* 🌈 Fix: Should not assume ~/.config is writable ([#136](https://github.com/hykilpikonna/hyfetch/pull/136))
* 🌈 Fix: Foreground-background arrangement not detected ([#154](https://github.com/hykilpikonna/hyfetch/pull/154))
* 🖼 OS - Update macOS version name list ([#140](https://github.com/hykilpikonna/hyfetch/pull/140))
* 🖼 Ascii - Improve color removal ([#161](https://github.com/hykilpikonna/hyfetch/pull/161))
* 🖼 Ascii - Fix reset character performance ([#158](https://github.com/hykilpikonna/hyfetch/pull/158))
* 🖼 Distro - Smoothen the Tumbleweed logo ([dylanaraps#2342](https://github.com/dylanaraps/neofetch/pull/2342))
* 🖼 Distro - Update RebornOS logo ([dylanaraps#2358](https://github.com/dylanaraps/neofetch/pull/2358))
* 🖼 Distro - Update Venom Linux logo ([#166](https://github.com/hykilpikonna/hyfetch/pull/166))
* 🖼 Distro - Add Windows 95 ASCII logo ([dylanaraps#2346](https://github.com/dylanaraps/neofetch/pull/2346))
* 🖼 Distro - Add ParchLinux ([dylanaraps#2045](https://github.com/dylanaraps/neofetch/pull/2045))
* 🖼 Distro - Add OpenKylin ([dylanaraps#2341](https://github.com/dylanaraps/neofetch/pull/2341))
* 🖼 Distro - Add EvolutionOS ([dylanaraps#2350](https://github.com/dylanaraps/neofetch/pull/2350))
* 🖼 Distro - Add Salix ([dylanaraps#2357](https://github.com/dylanaraps/neofetch/pull/2357))
* 🖼 Distro - Add Panwah ([dylanaraps#2355](https://github.com/dylanaraps/neofetch/pull/2355))
* 🖼 Distro - Add PhyOS ([#142](https://github.com/hykilpikonna/hyfetch/pull/142))
* 🖼 Distro - Add Athena OS ([#130](https://github.com/hykilpikonna/hyfetch/pull/130))
* 🖼 Distro - Add Meowix ([#159](https://github.com/hykilpikonna/hyfetch/pull/159))
* 🖼 Distro - Add Slackel ([#167](https://github.com/hykilpikonna/hyfetch/pull/167))
* 🖼 Distro - Support *Wrt variants ([dylanaraps#2352](https://github.com/dylanaraps/neofetch/pull/2352))
* 🖼 Version - Fix a typo that broke OS detection on WSL ([#155](https://github.com/hykilpikonna/hyfetch/pull/155))
* 🖼 Packages - Implement --package_separate flag ([#135](https://github.com/hykilpikonna/hyfetch/pull/135))
* 🖼 Packages - Separate flatpak-system and flatpak-user ([#135](https://github.com/hykilpikonna/hyfetch/pull/135))
* 🖼 Packages - Add steam as a package manager ([#152](https://github.com/hykilpikonna/hyfetch/pull/152))
* 🖼 Packages - Add squirrel package manager ([#153](https://github.com/hykilpikonna/hyfetch/pull/153))
* 🖼 Packages - Make cargo run on all systems ([#146](https://github.com/hykilpikonna/hyfetch/pull/146))
* 🖼 Packages - Fix cargo package count ([#144](https://github.com/hykilpikonna/hyfetch/pull/144))
* 🖼 Packages - Add Devbox package manager ([#137](https://github.com/hykilpikonna/hyfetch/pull/137))
* 🖼 Packages - Fix phantom package when pm command fails ([#145](https://github.com/hykilpikonna/hyfetch/pull/145))
* 🖼 Packages - Update scratch package manager ([#165](https://github.com/hykilpikonna/hyfetch/pull/165))
* 🖼 Editor - Better version detection
* 🖼 Resolution - Improve macOS resolution detection ([dylanaraps#2356](https://github.com/dylanaraps/neofetch/pull/2356))
* 🖼 Resolution - Add resolution lookup for iOS ([#164](https://github.com/hykilpikonna/hyfetch/pull/164))
* 🖼 Desktop - Display global KDE Plasma theme ([#163](https://github.com/hykilpikonna/hyfetch/pull/163))
* 🖼 IP - Improve macOS local IP detection ([dylanaraps#2362](https://github.com/dylanaraps/neofetch/pull/2362))
* 🖼 IP - Fix macOS route hangs on reverse DNS lookup
* 🖼 Config - Allow specifying default config to copy to ~/.config ([#133](https://github.com/hykilpikonna/hyfetch/pull/133))
### 1.4.9
* 🌈 Add pride month easter-egg animation! ⭐️
* 🌈 Autocompletion for bash, zsh, tcsh ([#96](https://github.com/hykilpikonna/hyfetch/pull/96))
* 🌈 Add nix profile installation method ([#114](https://github.com/hykilpikonna/hyfetch/pull/114))
* 🌈 Add experimental color overlay function for more accurate lightness adjusting (using `--c-overlay`)
* 🌈 Allow neofetch argument passthrough (using `--args='...'`)
* 🌈 Show recommended terminal size ([#129](https://github.com/hykilpikonna/hyfetch/pull/129))
* 🌈 Update issue & pull request templates
* 🖼 Hostname - Fix FQDN substitution
* 🖼 Version - Fix Windows version detection ([dylanaraps#2309](https://github.com/dylanaraps/neofetch/pull/2309))
* 🖼 Packages - Fix winget stuck on agreement ([#82](https://github.com/hykilpikonna/hyfetch/pull/82))
* 🖼 Distro - Fix Windows text encoding ([#115](https://github.com/hykilpikonna/hyfetch/pull/115))
* 🖼 Distro - Add Astra Linux ([dylanaraps#2313](https://github.com/dylanaraps/neofetch/pull/2313))
* 🖼 Distro - Add FemboyOS ([#121](https://github.com/hykilpikonna/hyfetch/pull/121))
* 🖼 Distro - Add Nobara Linux ([dylanaraps#2326](https://github.com/dylanaraps/neofetch/pull/2326))
* 🖼 Font - Fix Konsole ppid detection ([#116](https://github.com/hykilpikonna/hyfetch/pull/116))
* 🖼 Font - Fix Konsole terminal font detection ([#127](https://github.com/hykilpikonna/hyfetch/pull/127))
* 🖼 Image - Optimize sixel image display ([dylanaraps#2316](https://github.com/dylanaraps/neofetch/pull/2316))
### 1.4.8
* 🌈 Improved Windows git bash detection ([#99](https://github.com/hykilpikonna/hyfetch/pull/99))
* 🌈 Improved color formatting codes ([#101](https://github.com/hykilpikonna/hyfetch/pull/101))
* 🌈 Allow specifying distro in config ([#102](https://github.com/hykilpikonna/hyfetch/pull/102))
* 🌈 Allow specifying custom ascii files ([#104](https://github.com/hykilpikonna/hyfetch/pull/104))
* 🌈 Add omniromantic and pangender flags ([#106](https://github.com/hykilpikonna/hyfetch/pull/106))
* 🌈 Now officially packaged for ArchLinux Community & Slackware! ([#112](https://github.com/hykilpikonna/hyfetch/pull/112) | [#109](https://github.com/hykilpikonna/hyfetch/pull/109))
* 🖼 Host - Update Apple device model detection. ([#111](https://github.com/hykilpikonna/hyfetch/pull/111))
* 🖼 Distro - Add Salient OS. ([dylanaraps#2301](https://github.com/dylanaraps/neofetch/pull/2301))
* 🖼 Distro - Add PikaOS. ([#105](https://github.com/hykilpikonna/hyfetch/pull/105))
* 🖼 Distro - Add Archcraft minimal variant. ([#108](https://github.com/hykilpikonna/hyfetch/pull/108))
* 🖼 Distro - Update Vanilla logo. ([#98](https://github.com/hykilpikonna/hyfetch/pull/98))
* 🖼 Distro - ChromeOS version improvements. ([dylanaraps#2305](https://github.com/dylanaraps/neofetch/pull/2305))
* 🖼 CPU - Improved multi-cpu ARM chip detection. ([#97](https://github.com/hykilpikonna/hyfetch/pull/97))
* 🖼 Packages - Support pipx package manager. ([#107](https://github.com/hykilpikonna/hyfetch/pull/107))
### 1.4.7
* 🌈 Better Windows compatibility ([#45](https://github.com/hykilpikonna/hyfetch/pull/45), [#84](https://github.com/hykilpikonna/hyfetch/pull/84), [#87](https://github.com/hykilpikonna/hyfetch/pull/87), [#89](https://github.com/hykilpikonna/hyfetch/pull/89))
* 🌈 Add gendervoid flags ([#81](https://github.com/hykilpikonna/hyfetch/pull/81))
* 🌈 Fix ASCII extractor escape sequence generation ([#90](https://github.com/hykilpikonna/hyfetch/pull/90), [#91](https://github.com/hykilpikonna/hyfetch/pull/91))
* 🖼 Distro - Add CuteOS ([dylanaraps#2291](https://github.com/dylanaraps/neofetch/pull/2291))
* 🖼 Distro - Add Floflis ([dylanaraps#2289](https://github.com/dylanaraps/neofetch/pull/2289))
* 🖼 Distro - Add ArseLinux ([dylanaraps#2295](https://github.com/dylanaraps/neofetch/pull/2295))
* 🖼 Distro - Better Solaris support ([dylanaraps#2293](https://github.com/dylanaraps/neofetch/pull/2293))
* 🖼 Packages - Fix scoop package manager for Windows ([#93](https://github.com/hykilpikonna/hyfetch/pull/93))
* 🖼 Packages - Add Evox package manager for Stock Linux ([#95](https://github.com/hykilpikonna/hyfetch/pull/95))
* 🖼 WM - Fix false positive wm process name detection ([#88](https://github.com/hykilpikonna/hyfetch/pull/88), [#94](https://github.com/hykilpikonna/hyfetch/pull/94))
* 🖼 Misc - Added BIOS and bluetooth detection
### 1.4.6
* 🌈 Add compatibility for FastFetch version `>1.8.0` ([#62](https://github.com/hykilpikonna/hyfetch/pull/62))
* 🖼 Distro - Add Aperture Science ascii art ([#61](https://github.com/hykilpikonna/hyfetch/pull/61))
* 🖼 Distro - Add RhaymOS ([dylanaraps#2274](https://github.com/dylanaraps/neofetch/pull/2274))
* 🖼 Editor - Add editor information detection ([dylanaraps#2271](https://github.com/dylanaraps/neofetch/pull/2271))
* 🖼 Packages - Fix empty cargo directory ([#58](https://github.com/hykilpikonna/hyfetch/pull/58))
* 🖼 Terminal - Display gnome-console instead of kgx ([dylanaraps#2277](https://github.com/dylanaraps/neofetch/pull/2277))
* 🖼 Terminal - Fix terminal detection with new get_process_name function
* 🖼 CPU - Detect ISA string on RISC-V CPUs ([#60](https://github.com/hykilpikonna/hyfetch/pull/60))
* 🖼 Song - Fix CMUS player song detection on macOS ([#55](https://github.com/hykilpikonna/hyfetch/pull/55))
* 🖼 Network - Fix macOS network detection ([#56](https://github.com/hykilpikonna/hyfetch/pull/56))
* 🖼 Misc - Change LICENSE year to 2023 ([#59](https://github.com/hykilpikonna/hyfetch/pull/59))
### 1.4.5
* 🌈 **Support using FastFetch as a HyFetch backend** (`hyfetch -b fastfetch`)
* 🌈 Add config file argument ([#48](https://github.com/hykilpikonna/hyfetch/pull/48))
* 🌈 Fix problems caused by color detection on Windows ([#16](https://github.com/hykilpikonna/hyfetch/pull/16))
* 🌈 Support pure-python distro detection for FastFetch
* 🖼️ Distro - Add Aster Linux ([dylanaraps#2251](https://github.com/dylanaraps/neofetch/pull/2251))
* 🖼️ Distro - Add Hybrid Linux ([dylanaraps#2239](https://github.com/dylanaraps/neofetch/pull/2239))
* 🖼️ Distro - Add UrukOS ([dylanaraps#2258](https://github.com/dylanaraps/neofetch/pull/2258))
* 🖼️ Distro - Add Project Sasanqua ([dylanaraps#2264](https://github.com/dylanaraps/neofetch/pull/2264))
* 🖼️ Distro - Add Kali small variant ([dylanaraps#2242](https://github.com/dylanaraps/neofetch/pull/2242))
* 🖼️ Distro - Fix CachyOS matching ([dylanaraps#2026](https://github.com/dylanaraps/neofetch/pull/2026))
* 🖼 WM - Fix wm detection with `fuser` ([#39](https://github.com/hykilpikonna/hyfetch/pull/39))
* 🖼️ Memory - Make memory unit decimal calculation more accurate ([#52](https://github.com/hykilpikonna/hyfetch/pull/52))
* 🖼 Packages - Fix squirrel (Stock Linux) package count detection ([#39](https://github.com/hykilpikonna/hyfetch/pull/39))
* 🖼 Packages - Support cargo bin environment variable ([#49](https://github.com/hykilpikonna/hyfetch/pull/49))
* 🖼 Packages - Add tea.xyz package manager (issue [dylanaraps#2235](https://github.com/dylanaraps/neofetch/pull/2235))
### 1.4.4
* 🌈 Fix Python 3.11 compatibility ([#35](https://github.com/hykilpikonna/hyfetch/pull/35))
* 🌈 Fix many overflow problems when screen is too small
* 🖼️ Distro - Add Enso ([dylanaraps#2233](https://github.com/dylanaraps/neofetch/pull/2233))
* 🖼️ Memory - Optimize and fix memory unit conversion ([dylanaraps#2225](https://github.com/dylanaraps/neofetch/pull/2225))
* 🖼️ DE - Add dwl window manager ([dylanaraps#2234](https://github.com/dylanaraps/neofetch/pull/2234))
* 🖼️ DE - Fix XDG session detection for X11 ([dylanaraps#2232](https://github.com/dylanaraps/neofetch/pull/2232))
* 🖼️ CPU - Fix model detection for loongson ([#34](https://github.com/hykilpikonna/hyfetch/pull/34))
### 1.4.3
* 🌈 **Auto detect terminal background color & rgb support**
* 🌈 **Optimize experience on light-themed terminals**
* 🌈 Fix bugs with lightness and light-mode config not applying
* 🌈 Fix color alignment for distros with first color ≠ `${c1}` (e.g. Ubuntu Budgie)
* 🌈 Add unlabeled flags ([#25](https://github.com/hykilpikonna/hyfetch/pull/25))
* 🌈 Add gender nonconforming & femboy & tomboy flags ([#32](https://github.com/hykilpikonna/hyfetch/pull/32))
* 🌈 Fix jailbreak iOS shell `killed: 9` issue caused by ld signature check.
* 🖼️ Distro - Add garuda_small ([dylanaraps#2215](https://github.com/dylanaraps/neofetch/pull/2215))
* 🖼️ Distro - Add Cobalt Linux ([dylanaraps#2213](https://github.com/dylanaraps/neofetch/pull/2213))
* 🖼️ Distro - Add VanillaOS ([dylanaraps#2222](https://github.com/dylanaraps/neofetch/pull/2222))
* 🖼️ Distro - Surround macOS build number in parentheses ([#28](https://github.com/hykilpikonna/hyfetch/pull/28))
* 🖼️ Misc - Auto select stdout mode based on tty detection ([#31](https://github.com/hykilpikonna/hyfetch/pull/31))
* 🖼️ Bug Fix - Fix cols coloring reset for bash 3.2 ([#24](https://github.com/hykilpikonna/hyfetch/pull/24))
### 1.4.2
* 🌈 Detailed runnning/contributing instructions in README.md ([#21](https://github.com/hykilpikonna/hyfetch/pull/21))
* 🖼️ Distro - Add Stock Linux ([#23](https://github.com/hykilpikonna/hyfetch/pull/23))
* 🖼️ Distro - Add DietPi ([dylanaraps#1706](https://github.com/dylanaraps/neofetch/pull/1706))
* 🖼️ Distro - Add OmniOS illumos ([dylanaraps#2196](https://github.com/dylanaraps/neofetch/pull/2196))
* 🖼️ Distro - Add Droidian ([dylanaraps#2201](https://github.com/dylanaraps/neofetch/pull/2201))
* 🖼️ Distro - Add HamoniKR ([dylanaraps#2210](https://github.com/dylanaraps/neofetch/pull/2210))
* 🖼️ Song - Add support for TIDAL HiFi ([#22](https://github.com/hykilpikonna/hyfetch/pull/22))
* 🖼️ CPU - Detect multiple CPU models for ARM
* 🖼️ Misc - Better defaults: Show RAM in GiB, RAM percentage, CPU speed rounding, refresh rate
* 🖼️ Bug Fix - Fix bash 5.2 column cut off issue ([#24](https://github.com/hykilpikonna/hyfetch/pull/24))
### 1.4.1
* 🌈 Paginate flags ([#14](https://github.com/hykilpikonna/hyfetch/pull/14))
* 🌈 Add release workflow ([#15](https://github.com/hykilpikonna/hyfetch/pull/15))
* 🌈 Create automatic release script
* 🌈 Config page - Give warning when terminal size is too small
* 🌈 Config page - Optimize color arrangement selection on small screens
* 🌈 Add experimental Windows support (very unstable at the moment)
* 🖼️ Distro - Add ravynOS ([dylanaraps#2182](https://github.com/dylanaraps/neofetch/pull/2182))
* 🖼️ Distro - Add ChonkySealOS ([dylanaraps#2180](https://github.com/dylanaraps/neofetch/pull/2180))
* 🖼️ Distro - Add GhostBSD ([TheSudoer#18](https://github.com/hykilpikonna/hyfetch/pull/18))
* 🖼️ Distro - Add NekOS ([dylanaraps#2186](https://github.com/dylanaraps/neofetch/pull/2186))
* 🖼️ Distro - Add astOS ([dylanaraps#2185](https://github.com/dylanaraps/neofetch/pull/2185))
* 🖼️ Distro - Fix ChromeOS identification ([dylanaraps#1949](https://github.com/dylanaraps/neofetch/pull/1949))
* 🖼️ WM - Add Hyprland to the list of wayland wms ([dylanaraps#2190](https://github.com/dylanaraps/neofetch/pull/2190))
* 🖼️ Env - Add Java, Python, Node version detection (can be enabled in config)
* 🖼️ Bug Fix - Fix hostname detection when `inetutils` is not installed
* 🖼️ Bug Fix - Fix empty brackets displayed when no theme is found ([dylanaraps#1713](https://github.com/dylanaraps/neofetch/pull/1713))
* 🖼️ Bug Fix - Fix `$` escape bug in `strip_escape_codes()` ([dylanaraps#1543](https://github.com/dylanaraps/neofetch/pull/1543))
* 🖼️ Bug Fix - Fix backslash escape bug in `strip_escape_codes()` ([dylanaraps#1543](https://github.com/dylanaraps/neofetch/pull/1543))
* 🖼️ Bug Fix - Fix CPU detection on ARM QEMU
### 1.4.0
* 🌈 Add finsexual flag ([#12](https://github.com/hykilpikonna/hyfetch/pull/12))
* 🚀 Addressed a total of 128 currently open pull requests from neofetch
<details>
<summary>🖼️ Meta Changes</summary>
* Meta - Fixed shellcheck warnings in `neowofetch`
* Meta - Moved shellcheck from travis to GitHub Actions
* Meta - Created a script to automatically generate distro list
* Colors - Allow RGB colors in neofetch `--ascii_colors` argument ([dylanaraps#1726](https://github.com/dylanaraps/neofetch/pull/1726))
</details>
<details>
<summary>🖼️ Distro/OS Support Changes</summary>
* Distro - Update Ubuntu logo ([dylanaraps#2125](https://github.com/dylanaraps/neofetch/pull/2125))
* Distro - Add Exodia OS Predator ([dylanaraps#2174](https://github.com/dylanaraps/neofetch/pull/2174))
* Distro - Add Parch ([dylanaraps#2045](https://github.com/dylanaraps/neofetch/pull/2045))
* Distro - Add VzLinux ([dylanaraps#1971](https://github.com/dylanaraps/neofetch/pull/1971))
* Distro - Add Twister OS ([dylanaraps#1890](https://github.com/dylanaraps/neofetch/pull/1890))
* Distro - Add BlackPantherOS ([dylanaraps#1761](https://github.com/dylanaraps/neofetch/pull/1761))
* Distro - Add TorizonCore ([dylanaraps#1744](https://github.com/dylanaraps/neofetch/pull/1744))
* Distro - Add KrassOS ([dylanaraps#1631](https://github.com/dylanaraps/neofetch/pull/1631))
* Distro - Add Synology DSM ([dylanaraps#1666](https://github.com/dylanaraps/neofetch/pull/1666))
* Distro - Add MatuusOS ([dylanaraps#1902](https://github.com/dylanaraps/neofetch/pull/1902))
* Distro - Add HarDClanZ Linux ([dylanaraps#1797](https://github.com/dylanaraps/neofetch/pull/1797))
</details>
<details>
<summary>🖼️ Device Support Changes</summary>
* Host - Identify iMac, Mac mini, Mac Pro Models ([dylanaraps#1944](https://github.com/dylanaraps/neofetch/pull/1944))
* Host - Identify FreeBSD host model ([dylanaraps#1588](https://github.com/dylanaraps/neofetch/pull/1588))
* Font - Better font matching for st ([dylanaraps#1877](https://github.com/dylanaraps/neofetch/pull/1877))
* Theme - Use XSETTINGS to get theme without a DE ([dylanaraps#1831](https://github.com/dylanaraps/neofetch/pull/1831))
* Theme - Add QT theme detection ([dylanaraps#1713](https://github.com/dylanaraps/neofetch/pull/1713))
* Theme - Add LeftWM theme detection ([dylanaraps#1963](https://github.com/dylanaraps/neofetch/pull/1963))
* Cursor - Add cursor theme detection ([dylanaraps#1149](https://github.com/dylanaraps/neofetch/pull/1149))
* Terminal - Improve NixOS terminal identification ([dylanaraps#1134](https://github.com/dylanaraps/neofetch/pull/1134))
* Terminal - Use `/proc/.../cmdline` instead of `.../comm` ([dylanaraps#2034](https://github.com/dylanaraps/neofetch/pull/2034))
* Packages - Improve scoop/choco package count ([dylanaraps#1642](https://github.com/dylanaraps/neofetch/pull/1642))
</details>
<details>
<summary>🖼️ Bug Fixes</summary>
* Bug Fix - Fix prepended `=` for kitty ([dylanaraps#2116](https://github.com/dylanaraps/neofetch/pull/2116))
* Bug Fix - Hide domain in hostname by default ([dylanaraps#2095](https://github.com/dylanaraps/neofetch/pull/2095))
* Bug Fix - Respect TMPDIR if it exists ([dylanaraps#1891](https://github.com/dylanaraps/neofetch/pull/1891))
* Bug Fix - Fix terminal size over slow connection ([dylanaraps#1895](https://github.com/dylanaraps/neofetch/pull/1895))
* Bug Fix - Fix GPU detection for bumblebee dual-GPU ([dylanaraps#1131](https://github.com/dylanaraps/neofetch/pull/1131))
* Bug Fix - Strip colors in ASCII length calculation ([dylanaraps#1543](https://github.com/dylanaraps/neofetch/pull/1543))
</details>
### 1.3.0
<details>
<summary>🖼️ Ascii Art Changes</summary>
* Ascii - Improve Trisquel ([dylanaraps#1946](https://github.com/dylanaraps/neofetch/pull/1946))
* Ascii - Improve LangitKetujuh ([dylanaraps#1948](https://github.com/dylanaraps/neofetch/pull/1948))
* Ascii - Improve Artix small ([dylanaraps#1872](https://github.com/dylanaraps/neofetch/pull/1872))
* Ascii - Update Archcraft ([dylanaraps#1919](https://github.com/dylanaraps/neofetch/pull/1919))
</details>
<details>
<summary>🖼️ Distro/OS Support Changes</summary>
* OS - Support Old macOS 10.4 and 10.5 ([dylanaraps#2151](https://github.com/dylanaraps/neofetch/pull/2151))
* OS - Identify Hackintosh VM ([dylanaraps#2005](https://github.com/dylanaraps/neofetch/pull/2005))
* Distro - Fix model detection for Ubuntu Touch ([dylanaraps#2167](https://github.com/dylanaraps/neofetch/pull/2167))
* Distro - Add EncryptOS ([dylanaraps#2158](https://github.com/dylanaraps/neofetch/pull/2158))
* Distro - Add BigLinux ([dylanaraps#2061](https://github.com/dylanaraps/neofetch/pull/2061))
* Distro - Add AmogOS ([dylanaraps#1904](https://github.com/dylanaraps/neofetch/pull/1904))
* Distro - Add CutefishOS ([dylanaraps#2054](https://github.com/dylanaraps/neofetch/pull/2054))
* Distro - Add PearOS ([dylanaraps#2049](https://github.com/dylanaraps/neofetch/pull/2049))
* Distro - Add FusionX ([dylanaraps#2011](https://github.com/dylanaraps/neofetch/pull/2011))
* Distro - Add Q4OS ([dylanaraps#1973](https://github.com/dylanaraps/neofetch/pull/1973))
* Distro - Add CachyOS ([dylanaraps#2026](https://github.com/dylanaraps/neofetch/pull/2026))
* Distro - Add Soda Linux ([dylanaraps#2023](https://github.com/dylanaraps/neofetch/pull/2023))
* Distro - Add Elive Linux ([dylanaraps#1957](https://github.com/dylanaraps/neofetch/pull/1957))
* Distro - Add Uos ([dylanaraps#1991](https://github.com/dylanaraps/neofetch/pull/1991))
* Distro - Add MassOS ([dylanaraps#1947](https://github.com/dylanaraps/neofetch/pull/1947))
* Distro - Add CalinixOS ([dylanaraps#1988](https://github.com/dylanaraps/neofetch/pull/1988))
* Distro - Add Kaisen Linux ([dylanaraps#1958](https://github.com/dylanaraps/neofetch/pull/1958))
* Distro - Add yiffOS ([dylanaraps#1920](https://github.com/dylanaraps/neofetch/pull/1920))
* Distro - Add Sulin ([dylanaraps#1896](https://github.com/dylanaraps/neofetch/pull/1896))
* Distro - Add Wii Linux ([dylanaraps#1929](https://github.com/dylanaraps/neofetch/pull/1929))
* Distro - Add Linspire ([dylanaraps#1905](https://github.com/dylanaraps/neofetch/pull/1905))
* Distro - Add Ubuntu Kylin ([dylanaraps#1974](https://github.com/dylanaraps/neofetch/pull/1974))
* Distro - Add OPNsense ([dylanaraps#1055](https://github.com/dylanaraps/neofetch/pull/1055))
* Distro - Improve BSD machine arch detection ([dylanaraps#2015](https://github.com/dylanaraps/neofetch/pull/2015))
* Distro - Improve Manjaro version detection ([dylanaraps#1879](https://github.com/dylanaraps/neofetch/pull/1879))
</details>
<details>
<summary>🖼️ Device Support Changes</summary>
* Terminal - Add Fig ([dylanaraps#2077](https://github.com/dylanaraps/neofetch/pull/2077))
* Terminal - Identify font for Apple Terminal ([dylanaraps#2017](https://github.com/dylanaraps/neofetch/pull/2017))
* CPU - Identify core count for Apple M1 ([dylanaraps#2038](https://github.com/dylanaraps/neofetch/pull/2038))
* GPU - Identify OpenCL GPU without PCIe ([dylanaraps#1928](https://github.com/dylanaraps/neofetch/pull/1928))
* Host - Identify MacBook & Update iDevice models ([dylanaraps#1944](https://github.com/dylanaraps/neofetch/pull/1944))
* Battery - Identify power adapter for MacBooks ([dylanaraps#1945](https://github.com/dylanaraps/neofetch/pull/1945))
* DE - Identify KF5 and Qt versions for Plasma ([dylanaraps#2019](https://github.com/dylanaraps/neofetch/pull/2019))
* Packages - Improve GUIX package detection ([dylanaraps#2021](https://github.com/dylanaraps/neofetch/pull/2021))
* Packages - Add `pm` and `cargo` ([dylanaraps#1876](https://github.com/dylanaraps/neofetch/pull/1876))
* Network - Identify network capabilities ([dylanaraps#1511](https://github.com/dylanaraps/neofetch/pull/1511))
</details>
<details>
<summary>🖼️ Bug Fixes</summary>
* Bug Fix - Fix `col_offset` ([dylanaraps#2042](https://github.com/dylanaraps/neofetch/pull/2042))
* Bug Fix - Prioritize `/etc/os-release` ([dylanaraps#2067](https://github.com/dylanaraps/neofetch/pull/2067))
* Bug Fix - Ignore case when counting `.appimage` ([dylanaraps#2006](https://github.com/dylanaraps/neofetch/pull/2006))
* Bug Fix - Fix BSD freezing if pkg is not bootstrapped ([dylanaraps#2014](https://github.com/dylanaraps/neofetch/pull/2014))
* Bug Fix - Fix wrong icon theme ([dylanaraps#1873](https://github.com/dylanaraps/neofetch/pull/1873))
</details>
### 1.2.0
* 🚀 Take over `neofetch` with `neowofetch`
<details>
<summary>🖼️ Ascii Art Changes</summary>
* Ascii - Add uwuntu ([#9](https://github.com/hykilpikonna/hyfetch/pull/9)) (use it with `hyfetch --test-distro uwuntu` or `neowofetch --ascii_distro uwuntu`)
* Ascii - Better Void ascii art ([#10](https://github.com/hykilpikonna/hyfetch/pull/10))
* Ascii - Update old NixOS logo for compatibility ([dylanaraps#2114](https://github.com/dylanaraps/neofetch/pull/2114))
</details>
<details>
<summary>🖼️ Distro/OS Support Changes</summary>
* OS - Identify macOS 13 Ventura ([#8](https://github.com/hykilpikonna/hyfetch/pull/8))
* OS - Windows 11 Fluent ([dylanaraps#2109](https://github.com/dylanaraps/neofetch/pull/2109))
* Distro - Add Asahi Linux ([dylanaraps#2079](https://github.com/dylanaraps/neofetch/pull/2079))
* Distro - Add CenterOS ([dylanaraps#2097](https://github.com/dylanaraps/neofetch/pull/2097))
* Distro - Add Finnix ([dylanaraps#2099](https://github.com/dylanaraps/neofetch/pull/2099))
* Distro - Add Miracle Linux ([dylanaraps#2085](https://github.com/dylanaraps/neofetch/pull/2085))
* Distro - Add Univalent ([dylanaraps#2162](https://github.com/dylanaraps/neofetch/pull/2162))
* Distro - Add NomadBSD ([dylanaraps#2147](https://github.com/dylanaraps/neofetch/pull/2147))
* Distro - Add GrapheneOS ([dylanaraps#2146](https://github.com/dylanaraps/neofetch/pull/2146))
* Distro - Add ShastraOS ([dylanaraps#2149](https://github.com/dylanaraps/neofetch/pull/2149))
* Distro - Add Ubuntu Touch ([dylanaraps#2167](https://github.com/dylanaraps/neofetch/pull/2167))
* Distro - Add Ubuntu Sway ([dylanaraps#2136](https://github.com/dylanaraps/neofetch/pull/2136))
* Distro - Add Orchid Linux ([dylanaraps#2144](https://github.com/dylanaraps/neofetch/pull/2144))
* Distro - Add AOSC OS/Retro ([dylanaraps#2124](https://github.com/dylanaraps/neofetch/pull/2124))
* Distro - Add Ultramarine Linux ([dylanaraps#2115](https://github.com/dylanaraps/neofetch/pull/2115))
* Distro - Improve NixOS version detection ([dylanaraps#2157](https://github.com/dylanaraps/neofetch/pull/2157))
</details>
<details>
<summary>🖼️ Device/Program Support Changes</summary>
* Terminal - Add Termux ([dylanaraps#1923](https://github.com/dylanaraps/neofetch/pull/1923))
* CPU - Add loongarch64 ([dylanaraps#2140](https://github.com/dylanaraps/neofetch/pull/2140))
* CPU - Identify CPU name for ARM / RISCV ([dylanaraps#2139](https://github.com/dylanaraps/neofetch/pull/2139))
* Battery - Fix file not found ([dylanaraps#2130](https://github.com/dylanaraps/neofetch/pull/2130))
* GPU - Identify open-kernal Nvidia driver version ([dylanaraps#2128](https://github.com/dylanaraps/neofetch/pull/2128))
</details>
<details>
<summary>🖼️ Bug Fixes</summary>
* Bug Fix - Fix broken fedora output ([dylanaraps#2084](https://github.com/dylanaraps/neofetch/pull/2084))
</details>
<img width="200px" src="https://user-images.githubusercontent.com/22280294/181790059-47aa6f80-be99-4e67-8fa5-5c02b02842c6.png" align="right">
### 1.1.3rc1
* 🌈 Add foreground-background color arrangement to make Fedora and Ubuntu look nicer
* 🌈 Allow typing abbreviations in flag selection
* 🌈 Fix: Duplicate random color arrangements are appearing in selection screen
* 🌈 Fix: Inconsistant color arrangement when saved to config file
### 1.1.2
* Add more flags ([#5](https://github.com/hykilpikonna/hyfetch/pull/5))
* Removed `numpy` dependency that was used in 1.1.0
<img width="200px" src="https://user-images.githubusercontent.com/22280294/180901539-014f036e-c926-4470-ac72-a6d6dcf30672.png" align="right">
### 1.1.0
* Refactored a lot of things
* Added Beiyang flag xD
* Added interactive configurator for brightness adjustment
* Added dark/light mode selection
* Added color bar preview for RGB/8bit mode selection
* Added random color arrangement feature (for NixOS)
### 1.0.7
* Fix: Make config path not on init but when it's actually needed.
### 1.0.6
* Remove `hypy_utils` dependency to make packaging easier.
### 1.0.5
* Fix terminal emulator detection ([PR [#2](https://github.com/hykilpikonna/hyfetch/pull/2)](https://github.com/hykilpikonna/hyfetch/pull/2))
### 1.0.4
* Add more flags ([PR [#1](https://github.com/hykilpikonna/hyfetch/pull/1)](https://github.com/hykilpikonna/hyfetch/pull/1))
### 1.0.3
* Fix missing dependency for setuptools
### 1.0.2
* Implement RGB to 8bit conversion
* Add support for Python 3.7 and 3.8
### 1.0.1
* Included 11 flag presets
* Ability to lighten colors with `--c-set-l <lightness>`
* Command-line flag chooser
* Supports Python >= 3.9
## More Screenshots


## Original Readme from Neofetch Below
<h3 align="center"><img src="https://i.imgur.com/ZQI2EYz.png" alt="logo" height="100px"></h3>
<p align="center">A command-line system information tool written in bash 3.2+</p>
<p align="center">
<a href="./LICENSE.md"><img src="https://img.shields.io/badge/license-MIT-blue.svg"></a>
<a href="https://github.com/dylanaraps/neofetch/releases"><img src="https://img.shields.io/github/release/dylanaraps/neofetch.svg"></a>
<a href="https://repology.org/metapackage/neofetch"><img src="https://repology.org/badge/tiny-repos/neofetch.svg" alt="Packaging status"></a>
</p>
<img src="https://i.imgur.com/GFmC5Ad.png" alt="neofetch" align="right" height="240px">
Neofetch is a command-line system information tool written in `bash 3.2+`. Neofetch displays information about your operating system, software and hardware in an aesthetic and visually pleasing way.
The overall purpose of Neofetch is to be used in screen-shots of your system. Neofetch shows the information other people want to see. There are other tools available for proper system statistic/diagnostics.
The information by default is displayed alongside your operating system's logo. You can further configure Neofetch to instead use an image, a custom ASCII file, your wallpaper or nothing at all.
<img src="https://i.imgur.com/lUrkQBN.png" alt="neofetch" align="right" height="240px">
You can further configure Neofetch to display exactly what you want it to. Through the use of command-line flags and the configuration file you can change existing information outputs or add your own custom ones.
Neofetch supports almost 150 different operating systems. From Linux to Windows, all the way to more obscure operating systems like Minix, AIX and Haiku. If your favourite operating system is unsupported: Open up an issue and support will be added.
### More: \[[Dependencies](https://github.com/dylanaraps/neofetch/wiki/Dependencies)\] \[[Installation](https://github.com/dylanaraps/neofetch/wiki/Installation)\] \[[Wiki](https://github.com/dylanaraps/neofetch/wiki)\]
| PypiClean |
/CommandLineApp-3.0.7.tar.gz/CommandLineApp-3.0.7/docs/source/history.rst | #######
History
#######
3.0.7
- Repackage the documentation
3.0.6
- Bug fix from Cezary Statkiewicz for handling default arguments.
3.0.5
- Fixed packaging problems that prevented installation with easy_install and pip.
3.0.4
- Switched to sphinx for documentation.
3.0.3
- Updated the build to work with Mercurial and migrated the source to bitbucket host. No code changes.
3.0.2
- source file encoding patch from Ben Finney
3.0.1
- replace the test script missing from the 3.0 release
3.0
- `Ben Finney <http://benfinney.id.au/>`_ provided a patch to convert the names of the module, method, etc. to be PEP8-compliant. Thanks, Ben!
These changes are obviously backwards incompatible.
2.6
- Add initialization hooks to make application setup easier without overriding ``__init__()``.
2.5
- Updated to handle Unicode status messages more reliably.
2.4
- Code clean up and error handling changes.
2.3
- Refine help output a little more.
2.2
- Handle missing docstrings for main() and the class.
2.1
- Add automatic detection and validation of main function arguments, including help text generation. Also includes the main function docstring in :option:`--help` output.
2.0
- Substantial rewrite using inspect and with modified API.
1.0
- This is the old version, which was developed with and works under Python 1.5.4-2.5.
| PypiClean |
/Hikka_TL_New-2.0.4-py3-none-any.whl/hikkatl/tl/functions/channels.py | from ...tl.tlobject import TLObject
from ...tl.tlobject import TLRequest
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
from datetime import datetime
if TYPE_CHECKING:
from ...tl.types import TypeChannelAdminLogEventsFilter, TypeChannelParticipantsFilter, TypeChatAdminRights, TypeChatBannedRights, TypeInputChannel, TypeInputChatPhoto, TypeInputCheckPasswordSRP, TypeInputGeoPoint, TypeInputMessage, TypeInputPeer, TypeInputStickerSet, TypeInputUser
class CheckUsernameRequest(TLRequest):
CONSTRUCTOR_ID = 0x10e6bd2c
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel: 'TypeInputChannel', username: str):
"""
:returns Bool: This type has no constructors.
"""
self.channel = channel
self.username = username
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'CheckUsernameRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'username': self.username
}
def _bytes(self):
return b''.join((
b',\xbd\xe6\x10',
self.channel._bytes(),
self.serialize_bytes(self.username),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_username = reader.tgread_string()
return cls(channel=_channel, username=_username)
class ConvertToGigagroupRequest(TLRequest):
CONSTRUCTOR_ID = 0xb290c69
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel'):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ConvertToGigagroupRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel
}
def _bytes(self):
return b''.join((
b'i\x0c)\x0b',
self.channel._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
return cls(channel=_channel)
class CreateChannelRequest(TLRequest):
CONSTRUCTOR_ID = 0x91006707
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, title: str, about: str, broadcast: Optional[bool]=None, megagroup: Optional[bool]=None, for_import: Optional[bool]=None, forum: Optional[bool]=None, geo_point: Optional['TypeInputGeoPoint']=None, address: Optional[str]=None, ttl_period: Optional[int]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.title = title
self.about = about
self.broadcast = broadcast
self.megagroup = megagroup
self.for_import = for_import
self.forum = forum
self.geo_point = geo_point
self.address = address
self.ttl_period = ttl_period
def to_dict(self):
return {
'_': 'CreateChannelRequest',
'title': self.title,
'about': self.about,
'broadcast': self.broadcast,
'megagroup': self.megagroup,
'for_import': self.for_import,
'forum': self.forum,
'geo_point': self.geo_point.to_dict() if isinstance(self.geo_point, TLObject) else self.geo_point,
'address': self.address,
'ttl_period': self.ttl_period
}
def _bytes(self):
assert ((self.geo_point or self.geo_point is not None) and (self.address or self.address is not None)) or ((self.geo_point is None or self.geo_point is False) and (self.address is None or self.address is False)), 'geo_point, address parameters must all be False-y (like None) or all me True-y'
return b''.join((
b'\x07g\x00\x91',
struct.pack('<I', (0 if self.broadcast is None or self.broadcast is False else 1) | (0 if self.megagroup is None or self.megagroup is False else 2) | (0 if self.for_import is None or self.for_import is False else 8) | (0 if self.forum is None or self.forum is False else 32) | (0 if self.geo_point is None or self.geo_point is False else 4) | (0 if self.address is None or self.address is False else 4) | (0 if self.ttl_period is None or self.ttl_period is False else 16)),
self.serialize_bytes(self.title),
self.serialize_bytes(self.about),
b'' if self.geo_point is None or self.geo_point is False else (self.geo_point._bytes()),
b'' if self.address is None or self.address is False else (self.serialize_bytes(self.address)),
b'' if self.ttl_period is None or self.ttl_period is False else (struct.pack('<i', self.ttl_period)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_broadcast = bool(flags & 1)
_megagroup = bool(flags & 2)
_for_import = bool(flags & 8)
_forum = bool(flags & 32)
_title = reader.tgread_string()
_about = reader.tgread_string()
if flags & 4:
_geo_point = reader.tgread_object()
else:
_geo_point = None
if flags & 4:
_address = reader.tgread_string()
else:
_address = None
if flags & 16:
_ttl_period = reader.read_int()
else:
_ttl_period = None
return cls(title=_title, about=_about, broadcast=_broadcast, megagroup=_megagroup, for_import=_for_import, forum=_forum, geo_point=_geo_point, address=_address, ttl_period=_ttl_period)
class CreateForumTopicRequest(TLRequest):
CONSTRUCTOR_ID = 0xf40c0224
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', title: str, icon_color: Optional[int]=None, icon_emoji_id: Optional[int]=None, random_id: int=None, send_as: Optional['TypeInputPeer']=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.title = title
self.icon_color = icon_color
self.icon_emoji_id = icon_emoji_id
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(8), 'big', signed=True)
self.send_as = send_as
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
if self.send_as:
self.send_as = utils.get_input_peer(await client.get_input_entity(self.send_as))
def to_dict(self):
return {
'_': 'CreateForumTopicRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'title': self.title,
'icon_color': self.icon_color,
'icon_emoji_id': self.icon_emoji_id,
'random_id': self.random_id,
'send_as': self.send_as.to_dict() if isinstance(self.send_as, TLObject) else self.send_as
}
def _bytes(self):
return b''.join((
b'$\x02\x0c\xf4',
struct.pack('<I', (0 if self.icon_color is None or self.icon_color is False else 1) | (0 if self.icon_emoji_id is None or self.icon_emoji_id is False else 8) | (0 if self.send_as is None or self.send_as is False else 4)),
self.channel._bytes(),
self.serialize_bytes(self.title),
b'' if self.icon_color is None or self.icon_color is False else (struct.pack('<i', self.icon_color)),
b'' if self.icon_emoji_id is None or self.icon_emoji_id is False else (struct.pack('<q', self.icon_emoji_id)),
struct.pack('<q', self.random_id),
b'' if self.send_as is None or self.send_as is False else (self.send_as._bytes()),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_channel = reader.tgread_object()
_title = reader.tgread_string()
if flags & 1:
_icon_color = reader.read_int()
else:
_icon_color = None
if flags & 8:
_icon_emoji_id = reader.read_long()
else:
_icon_emoji_id = None
_random_id = reader.read_long()
if flags & 4:
_send_as = reader.tgread_object()
else:
_send_as = None
return cls(channel=_channel, title=_title, icon_color=_icon_color, icon_emoji_id=_icon_emoji_id, random_id=_random_id, send_as=_send_as)
class DeactivateAllUsernamesRequest(TLRequest):
CONSTRUCTOR_ID = 0xa245dd3
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel: 'TypeInputChannel'):
"""
:returns Bool: This type has no constructors.
"""
self.channel = channel
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'DeactivateAllUsernamesRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel
}
def _bytes(self):
return b''.join((
b'\xd3]$\n',
self.channel._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
return cls(channel=_channel)
class DeleteChannelRequest(TLRequest):
CONSTRUCTOR_ID = 0xc0111fe3
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel'):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'DeleteChannelRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel
}
def _bytes(self):
return b''.join((
b'\xe3\x1f\x11\xc0',
self.channel._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
return cls(channel=_channel)
class DeleteHistoryRequest(TLRequest):
CONSTRUCTOR_ID = 0x9baa9647
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', max_id: int, for_everyone: Optional[bool]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.max_id = max_id
self.for_everyone = for_everyone
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'DeleteHistoryRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'max_id': self.max_id,
'for_everyone': self.for_everyone
}
def _bytes(self):
return b''.join((
b'G\x96\xaa\x9b',
struct.pack('<I', (0 if self.for_everyone is None or self.for_everyone is False else 1)),
self.channel._bytes(),
struct.pack('<i', self.max_id),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_for_everyone = bool(flags & 1)
_channel = reader.tgread_object()
_max_id = reader.read_int()
return cls(channel=_channel, max_id=_max_id, for_everyone=_for_everyone)
class DeleteMessagesRequest(TLRequest):
CONSTRUCTOR_ID = 0x84c1fd4e
SUBCLASS_OF_ID = 0xced3c06e
# noinspection PyShadowingBuiltins
def __init__(self, channel: 'TypeInputChannel', id: List[int]):
"""
:returns messages.AffectedMessages: Instance of AffectedMessages.
"""
self.channel = channel
self.id = id
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'DeleteMessagesRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'id': [] if self.id is None else self.id[:]
}
def _bytes(self):
return b''.join((
b'N\xfd\xc1\x84',
self.channel._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return cls(channel=_channel, id=_id)
class DeleteParticipantHistoryRequest(TLRequest):
CONSTRUCTOR_ID = 0x367544db
SUBCLASS_OF_ID = 0x2c49c116
def __init__(self, channel: 'TypeInputChannel', participant: 'TypeInputPeer'):
"""
:returns messages.AffectedHistory: Instance of AffectedHistory.
"""
self.channel = channel
self.participant = participant
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
self.participant = utils.get_input_peer(await client.get_input_entity(self.participant))
def to_dict(self):
return {
'_': 'DeleteParticipantHistoryRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'participant': self.participant.to_dict() if isinstance(self.participant, TLObject) else self.participant
}
def _bytes(self):
return b''.join((
b'\xdbDu6',
self.channel._bytes(),
self.participant._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_participant = reader.tgread_object()
return cls(channel=_channel, participant=_participant)
class DeleteTopicHistoryRequest(TLRequest):
CONSTRUCTOR_ID = 0x34435f2d
SUBCLASS_OF_ID = 0x2c49c116
def __init__(self, channel: 'TypeInputChannel', top_msg_id: int):
"""
:returns messages.AffectedHistory: Instance of AffectedHistory.
"""
self.channel = channel
self.top_msg_id = top_msg_id
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'DeleteTopicHistoryRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'top_msg_id': self.top_msg_id
}
def _bytes(self):
return b''.join((
b'-_C4',
self.channel._bytes(),
struct.pack('<i', self.top_msg_id),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_top_msg_id = reader.read_int()
return cls(channel=_channel, top_msg_id=_top_msg_id)
class EditAdminRequest(TLRequest):
CONSTRUCTOR_ID = 0xd33c8902
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', user_id: 'TypeInputUser', admin_rights: 'TypeChatAdminRights', rank: str):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.user_id = user_id
self.admin_rights = admin_rights
self.rank = rank
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'EditAdminRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id,
'admin_rights': self.admin_rights.to_dict() if isinstance(self.admin_rights, TLObject) else self.admin_rights,
'rank': self.rank
}
def _bytes(self):
return b''.join((
b'\x02\x89<\xd3',
self.channel._bytes(),
self.user_id._bytes(),
self.admin_rights._bytes(),
self.serialize_bytes(self.rank),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
_admin_rights = reader.tgread_object()
_rank = reader.tgread_string()
return cls(channel=_channel, user_id=_user_id, admin_rights=_admin_rights, rank=_rank)
class EditBannedRequest(TLRequest):
CONSTRUCTOR_ID = 0x96e6cd81
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', participant: 'TypeInputPeer', banned_rights: 'TypeChatBannedRights'):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.participant = participant
self.banned_rights = banned_rights
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
self.participant = utils.get_input_peer(await client.get_input_entity(self.participant))
def to_dict(self):
return {
'_': 'EditBannedRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'participant': self.participant.to_dict() if isinstance(self.participant, TLObject) else self.participant,
'banned_rights': self.banned_rights.to_dict() if isinstance(self.banned_rights, TLObject) else self.banned_rights
}
def _bytes(self):
return b''.join((
b'\x81\xcd\xe6\x96',
self.channel._bytes(),
self.participant._bytes(),
self.banned_rights._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_participant = reader.tgread_object()
_banned_rights = reader.tgread_object()
return cls(channel=_channel, participant=_participant, banned_rights=_banned_rights)
class EditCreatorRequest(TLRequest):
CONSTRUCTOR_ID = 0x8f38cd1f
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', user_id: 'TypeInputUser', password: 'TypeInputCheckPasswordSRP'):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.user_id = user_id
self.password = password
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'EditCreatorRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'user_id': self.user_id.to_dict() if isinstance(self.user_id, TLObject) else self.user_id,
'password': self.password.to_dict() if isinstance(self.password, TLObject) else self.password
}
def _bytes(self):
return b''.join((
b'\x1f\xcd8\x8f',
self.channel._bytes(),
self.user_id._bytes(),
self.password._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_user_id = reader.tgread_object()
_password = reader.tgread_object()
return cls(channel=_channel, user_id=_user_id, password=_password)
class EditForumTopicRequest(TLRequest):
CONSTRUCTOR_ID = 0xf4dfa185
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', topic_id: int, title: Optional[str]=None, icon_emoji_id: Optional[int]=None, closed: Optional[bool]=None, hidden: Optional[bool]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.topic_id = topic_id
self.title = title
self.icon_emoji_id = icon_emoji_id
self.closed = closed
self.hidden = hidden
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'EditForumTopicRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'topic_id': self.topic_id,
'title': self.title,
'icon_emoji_id': self.icon_emoji_id,
'closed': self.closed,
'hidden': self.hidden
}
def _bytes(self):
return b''.join((
b'\x85\xa1\xdf\xf4',
struct.pack('<I', (0 if self.title is None or self.title is False else 1) | (0 if self.icon_emoji_id is None or self.icon_emoji_id is False else 2) | (0 if self.closed is None else 4) | (0 if self.hidden is None else 8)),
self.channel._bytes(),
struct.pack('<i', self.topic_id),
b'' if self.title is None or self.title is False else (self.serialize_bytes(self.title)),
b'' if self.icon_emoji_id is None or self.icon_emoji_id is False else (struct.pack('<q', self.icon_emoji_id)),
b'' if self.closed is None else (b'\xb5ur\x99' if self.closed else b'7\x97y\xbc'),
b'' if self.hidden is None else (b'\xb5ur\x99' if self.hidden else b'7\x97y\xbc'),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_channel = reader.tgread_object()
_topic_id = reader.read_int()
if flags & 1:
_title = reader.tgread_string()
else:
_title = None
if flags & 2:
_icon_emoji_id = reader.read_long()
else:
_icon_emoji_id = None
if flags & 4:
_closed = reader.tgread_bool()
else:
_closed = None
if flags & 8:
_hidden = reader.tgread_bool()
else:
_hidden = None
return cls(channel=_channel, topic_id=_topic_id, title=_title, icon_emoji_id=_icon_emoji_id, closed=_closed, hidden=_hidden)
class EditLocationRequest(TLRequest):
CONSTRUCTOR_ID = 0x58e63f6d
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel: 'TypeInputChannel', geo_point: 'TypeInputGeoPoint', address: str):
"""
:returns Bool: This type has no constructors.
"""
self.channel = channel
self.geo_point = geo_point
self.address = address
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'EditLocationRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'geo_point': self.geo_point.to_dict() if isinstance(self.geo_point, TLObject) else self.geo_point,
'address': self.address
}
def _bytes(self):
return b''.join((
b'm?\xe6X',
self.channel._bytes(),
self.geo_point._bytes(),
self.serialize_bytes(self.address),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_geo_point = reader.tgread_object()
_address = reader.tgread_string()
return cls(channel=_channel, geo_point=_geo_point, address=_address)
class EditPhotoRequest(TLRequest):
CONSTRUCTOR_ID = 0xf12e57c9
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', photo: 'TypeInputChatPhoto'):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.photo = photo
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
self.photo = utils.get_input_chat_photo(self.photo)
def to_dict(self):
return {
'_': 'EditPhotoRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'photo': self.photo.to_dict() if isinstance(self.photo, TLObject) else self.photo
}
def _bytes(self):
return b''.join((
b'\xc9W.\xf1',
self.channel._bytes(),
self.photo._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_photo = reader.tgread_object()
return cls(channel=_channel, photo=_photo)
class EditTitleRequest(TLRequest):
CONSTRUCTOR_ID = 0x566decd0
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', title: str):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.title = title
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'EditTitleRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'title': self.title
}
def _bytes(self):
return b''.join((
b'\xd0\xecmV',
self.channel._bytes(),
self.serialize_bytes(self.title),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_title = reader.tgread_string()
return cls(channel=_channel, title=_title)
class ExportMessageLinkRequest(TLRequest):
CONSTRUCTOR_ID = 0xe63fadeb
SUBCLASS_OF_ID = 0xdee644cc
# noinspection PyShadowingBuiltins
def __init__(self, channel: 'TypeInputChannel', id: int, grouped: Optional[bool]=None, thread: Optional[bool]=None):
"""
:returns ExportedMessageLink: Instance of ExportedMessageLink.
"""
self.channel = channel
self.id = id
self.grouped = grouped
self.thread = thread
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ExportMessageLinkRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'id': self.id,
'grouped': self.grouped,
'thread': self.thread
}
def _bytes(self):
return b''.join((
b'\xeb\xad?\xe6',
struct.pack('<I', (0 if self.grouped is None or self.grouped is False else 1) | (0 if self.thread is None or self.thread is False else 2)),
self.channel._bytes(),
struct.pack('<i', self.id),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_grouped = bool(flags & 1)
_thread = bool(flags & 2)
_channel = reader.tgread_object()
_id = reader.read_int()
return cls(channel=_channel, id=_id, grouped=_grouped, thread=_thread)
class GetAdminLogRequest(TLRequest):
CONSTRUCTOR_ID = 0x33ddf480
SUBCLASS_OF_ID = 0x51f076bc
def __init__(self, channel: 'TypeInputChannel', q: str, max_id: int, min_id: int, limit: int, events_filter: Optional['TypeChannelAdminLogEventsFilter']=None, admins: Optional[List['TypeInputUser']]=None):
"""
:returns channels.AdminLogResults: Instance of AdminLogResults.
"""
self.channel = channel
self.q = q
self.max_id = max_id
self.min_id = min_id
self.limit = limit
self.events_filter = events_filter
self.admins = admins
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
if self.admins:
_tmp = []
for _x in self.admins:
_tmp.append(utils.get_input_user(await client.get_input_entity(_x)))
self.admins = _tmp
def to_dict(self):
return {
'_': 'GetAdminLogRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'q': self.q,
'max_id': self.max_id,
'min_id': self.min_id,
'limit': self.limit,
'events_filter': self.events_filter.to_dict() if isinstance(self.events_filter, TLObject) else self.events_filter,
'admins': [] if self.admins is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.admins]
}
def _bytes(self):
return b''.join((
b'\x80\xf4\xdd3',
struct.pack('<I', (0 if self.events_filter is None or self.events_filter is False else 1) | (0 if self.admins is None or self.admins is False else 2)),
self.channel._bytes(),
self.serialize_bytes(self.q),
b'' if self.events_filter is None or self.events_filter is False else (self.events_filter._bytes()),
b'' if self.admins is None or self.admins is False else b''.join((b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.admins)),b''.join(x._bytes() for x in self.admins))),
struct.pack('<q', self.max_id),
struct.pack('<q', self.min_id),
struct.pack('<i', self.limit),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_channel = reader.tgread_object()
_q = reader.tgread_string()
if flags & 1:
_events_filter = reader.tgread_object()
else:
_events_filter = None
if flags & 2:
reader.read_int()
_admins = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_admins.append(_x)
else:
_admins = None
_max_id = reader.read_long()
_min_id = reader.read_long()
_limit = reader.read_int()
return cls(channel=_channel, q=_q, max_id=_max_id, min_id=_min_id, limit=_limit, events_filter=_events_filter, admins=_admins)
class GetAdminedPublicChannelsRequest(TLRequest):
CONSTRUCTOR_ID = 0xf8b036af
SUBCLASS_OF_ID = 0x99d5cb14
def __init__(self, by_location: Optional[bool]=None, check_limit: Optional[bool]=None):
"""
:returns messages.Chats: Instance of either Chats, ChatsSlice.
"""
self.by_location = by_location
self.check_limit = check_limit
def to_dict(self):
return {
'_': 'GetAdminedPublicChannelsRequest',
'by_location': self.by_location,
'check_limit': self.check_limit
}
def _bytes(self):
return b''.join((
b'\xaf6\xb0\xf8',
struct.pack('<I', (0 if self.by_location is None or self.by_location is False else 1) | (0 if self.check_limit is None or self.check_limit is False else 2)),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_by_location = bool(flags & 1)
_check_limit = bool(flags & 2)
return cls(by_location=_by_location, check_limit=_check_limit)
class GetChannelsRequest(TLRequest):
CONSTRUCTOR_ID = 0xa7f6bbb
SUBCLASS_OF_ID = 0x99d5cb14
# noinspection PyShadowingBuiltins
def __init__(self, id: List['TypeInputChannel']):
"""
:returns messages.Chats: Instance of either Chats, ChatsSlice.
"""
self.id = id
async def resolve(self, client, utils):
_tmp = []
for _x in self.id:
_tmp.append(utils.get_input_channel(await client.get_input_entity(_x)))
self.id = _tmp
def to_dict(self):
return {
'_': 'GetChannelsRequest',
'id': [] if self.id is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.id]
}
def _bytes(self):
return b''.join((
b'\xbbk\x7f\n',
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(x._bytes() for x in self.id),
))
@classmethod
def from_reader(cls, reader):
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_id.append(_x)
return cls(id=_id)
class GetForumTopicsRequest(TLRequest):
CONSTRUCTOR_ID = 0xde560d1
SUBCLASS_OF_ID = 0x8e1d3e1e
def __init__(self, channel: 'TypeInputChannel', offset_date: Optional[datetime], offset_id: int, offset_topic: int, limit: int, q: Optional[str]=None):
"""
:returns messages.ForumTopics: Instance of ForumTopics.
"""
self.channel = channel
self.offset_date = offset_date
self.offset_id = offset_id
self.offset_topic = offset_topic
self.limit = limit
self.q = q
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetForumTopicsRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'offset_date': self.offset_date,
'offset_id': self.offset_id,
'offset_topic': self.offset_topic,
'limit': self.limit,
'q': self.q
}
def _bytes(self):
return b''.join((
b'\xd1`\xe5\r',
struct.pack('<I', (0 if self.q is None or self.q is False else 1)),
self.channel._bytes(),
b'' if self.q is None or self.q is False else (self.serialize_bytes(self.q)),
self.serialize_datetime(self.offset_date),
struct.pack('<i', self.offset_id),
struct.pack('<i', self.offset_topic),
struct.pack('<i', self.limit),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_channel = reader.tgread_object()
if flags & 1:
_q = reader.tgread_string()
else:
_q = None
_offset_date = reader.tgread_date()
_offset_id = reader.read_int()
_offset_topic = reader.read_int()
_limit = reader.read_int()
return cls(channel=_channel, offset_date=_offset_date, offset_id=_offset_id, offset_topic=_offset_topic, limit=_limit, q=_q)
class GetForumTopicsByIDRequest(TLRequest):
CONSTRUCTOR_ID = 0xb0831eb9
SUBCLASS_OF_ID = 0x8e1d3e1e
def __init__(self, channel: 'TypeInputChannel', topics: List[int]):
"""
:returns messages.ForumTopics: Instance of ForumTopics.
"""
self.channel = channel
self.topics = topics
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetForumTopicsByIDRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'topics': [] if self.topics is None else self.topics[:]
}
def _bytes(self):
return b''.join((
b'\xb9\x1e\x83\xb0',
self.channel._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.topics)),b''.join(struct.pack('<i', x) for x in self.topics),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
reader.read_int()
_topics = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_topics.append(_x)
return cls(channel=_channel, topics=_topics)
class GetFullChannelRequest(TLRequest):
CONSTRUCTOR_ID = 0x8736a09
SUBCLASS_OF_ID = 0x225a5109
def __init__(self, channel: 'TypeInputChannel'):
"""
:returns messages.ChatFull: Instance of ChatFull.
"""
self.channel = channel
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetFullChannelRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel
}
def _bytes(self):
return b''.join((
b'\tjs\x08',
self.channel._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
return cls(channel=_channel)
class GetGroupsForDiscussionRequest(TLRequest):
CONSTRUCTOR_ID = 0xf5dad378
SUBCLASS_OF_ID = 0x99d5cb14
def to_dict(self):
return {
'_': 'GetGroupsForDiscussionRequest'
}
def _bytes(self):
return b''.join((
b'x\xd3\xda\xf5',
))
@classmethod
def from_reader(cls, reader):
return cls()
class GetInactiveChannelsRequest(TLRequest):
CONSTRUCTOR_ID = 0x11e831ee
SUBCLASS_OF_ID = 0x8bf3d7d4
def to_dict(self):
return {
'_': 'GetInactiveChannelsRequest'
}
def _bytes(self):
return b''.join((
b'\xee1\xe8\x11',
))
@classmethod
def from_reader(cls, reader):
return cls()
class GetLeftChannelsRequest(TLRequest):
CONSTRUCTOR_ID = 0x8341ecc0
SUBCLASS_OF_ID = 0x99d5cb14
def __init__(self, offset: int):
"""
:returns messages.Chats: Instance of either Chats, ChatsSlice.
"""
self.offset = offset
def to_dict(self):
return {
'_': 'GetLeftChannelsRequest',
'offset': self.offset
}
def _bytes(self):
return b''.join((
b'\xc0\xecA\x83',
struct.pack('<i', self.offset),
))
@classmethod
def from_reader(cls, reader):
_offset = reader.read_int()
return cls(offset=_offset)
class GetMessagesRequest(TLRequest):
CONSTRUCTOR_ID = 0xad8c9a23
SUBCLASS_OF_ID = 0xd4b40b5e
# noinspection PyShadowingBuiltins
def __init__(self, channel: 'TypeInputChannel', id: List['TypeInputMessage']):
"""
:returns messages.Messages: Instance of either Messages, MessagesSlice, ChannelMessages, MessagesNotModified.
"""
self.channel = channel
self.id = id
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
_tmp = []
for _x in self.id:
_tmp.append(utils.get_input_message(_x))
self.id = _tmp
def to_dict(self):
return {
'_': 'GetMessagesRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'id': [] if self.id is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.id]
}
def _bytes(self):
return b''.join((
b'#\x9a\x8c\xad',
self.channel._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(x._bytes() for x in self.id),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_id.append(_x)
return cls(channel=_channel, id=_id)
class GetParticipantRequest(TLRequest):
CONSTRUCTOR_ID = 0xa0ab6cc6
SUBCLASS_OF_ID = 0x6658151a
def __init__(self, channel: 'TypeInputChannel', participant: 'TypeInputPeer'):
"""
:returns channels.ChannelParticipant: Instance of ChannelParticipant.
"""
self.channel = channel
self.participant = participant
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
self.participant = utils.get_input_peer(await client.get_input_entity(self.participant))
def to_dict(self):
return {
'_': 'GetParticipantRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'participant': self.participant.to_dict() if isinstance(self.participant, TLObject) else self.participant
}
def _bytes(self):
return b''.join((
b'\xc6l\xab\xa0',
self.channel._bytes(),
self.participant._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_participant = reader.tgread_object()
return cls(channel=_channel, participant=_participant)
class GetParticipantsRequest(TLRequest):
CONSTRUCTOR_ID = 0x77ced9d0
SUBCLASS_OF_ID = 0xe60a6e64
# noinspection PyShadowingBuiltins
def __init__(self, channel: 'TypeInputChannel', filter: 'TypeChannelParticipantsFilter', offset: int, limit: int, hash: int):
"""
:returns channels.ChannelParticipants: Instance of either ChannelParticipants, ChannelParticipantsNotModified.
"""
self.channel = channel
self.filter = filter
self.offset = offset
self.limit = limit
self.hash = hash
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetParticipantsRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'filter': self.filter.to_dict() if isinstance(self.filter, TLObject) else self.filter,
'offset': self.offset,
'limit': self.limit,
'hash': self.hash
}
def _bytes(self):
return b''.join((
b'\xd0\xd9\xcew',
self.channel._bytes(),
self.filter._bytes(),
struct.pack('<i', self.offset),
struct.pack('<i', self.limit),
struct.pack('<q', self.hash),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_filter = reader.tgread_object()
_offset = reader.read_int()
_limit = reader.read_int()
_hash = reader.read_long()
return cls(channel=_channel, filter=_filter, offset=_offset, limit=_limit, hash=_hash)
class GetSendAsRequest(TLRequest):
CONSTRUCTOR_ID = 0xdc770ee
SUBCLASS_OF_ID = 0x38cb8d21
def __init__(self, peer: 'TypeInputPeer'):
"""
:returns channels.SendAsPeers: Instance of SendAsPeers.
"""
self.peer = peer
async def resolve(self, client, utils):
self.peer = utils.get_input_peer(await client.get_input_entity(self.peer))
def to_dict(self):
return {
'_': 'GetSendAsRequest',
'peer': self.peer.to_dict() if isinstance(self.peer, TLObject) else self.peer
}
def _bytes(self):
return b''.join((
b'\xeep\xc7\r',
self.peer._bytes(),
))
@classmethod
def from_reader(cls, reader):
_peer = reader.tgread_object()
return cls(peer=_peer)
class GetSponsoredMessagesRequest(TLRequest):
CONSTRUCTOR_ID = 0xec210fbf
SUBCLASS_OF_ID = 0x7f4169e0
def __init__(self, channel: 'TypeInputChannel'):
"""
:returns messages.SponsoredMessages: Instance of either SponsoredMessages, SponsoredMessagesEmpty.
"""
self.channel = channel
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'GetSponsoredMessagesRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel
}
def _bytes(self):
return b''.join((
b'\xbf\x0f!\xec',
self.channel._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
return cls(channel=_channel)
class InviteToChannelRequest(TLRequest):
CONSTRUCTOR_ID = 0x199f3a6c
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', users: List['TypeInputUser']):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.users = users
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
_tmp = []
for _x in self.users:
_tmp.append(utils.get_input_user(await client.get_input_entity(_x)))
self.users = _tmp
def to_dict(self):
return {
'_': 'InviteToChannelRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'users': [] if self.users is None else [x.to_dict() if isinstance(x, TLObject) else x for x in self.users]
}
def _bytes(self):
return b''.join((
b'l:\x9f\x19',
self.channel._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.users)),b''.join(x._bytes() for x in self.users),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
reader.read_int()
_users = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_users.append(_x)
return cls(channel=_channel, users=_users)
class JoinChannelRequest(TLRequest):
CONSTRUCTOR_ID = 0x24b524c5
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel'):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'JoinChannelRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel
}
def _bytes(self):
return b''.join((
b'\xc5$\xb5$',
self.channel._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
return cls(channel=_channel)
class LeaveChannelRequest(TLRequest):
CONSTRUCTOR_ID = 0xf836aa95
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel'):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'LeaveChannelRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel
}
def _bytes(self):
return b''.join((
b'\x95\xaa6\xf8',
self.channel._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
return cls(channel=_channel)
class ReadHistoryRequest(TLRequest):
CONSTRUCTOR_ID = 0xcc104937
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel: 'TypeInputChannel', max_id: int):
"""
:returns Bool: This type has no constructors.
"""
self.channel = channel
self.max_id = max_id
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ReadHistoryRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'max_id': self.max_id
}
def _bytes(self):
return b''.join((
b'7I\x10\xcc',
self.channel._bytes(),
struct.pack('<i', self.max_id),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_max_id = reader.read_int()
return cls(channel=_channel, max_id=_max_id)
class ReadMessageContentsRequest(TLRequest):
CONSTRUCTOR_ID = 0xeab5dc38
SUBCLASS_OF_ID = 0xf5b399ac
# noinspection PyShadowingBuiltins
def __init__(self, channel: 'TypeInputChannel', id: List[int]):
"""
:returns Bool: This type has no constructors.
"""
self.channel = channel
self.id = id
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ReadMessageContentsRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'id': [] if self.id is None else self.id[:]
}
def _bytes(self):
return b''.join((
b'8\xdc\xb5\xea',
self.channel._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return cls(channel=_channel, id=_id)
class ReorderPinnedForumTopicsRequest(TLRequest):
CONSTRUCTOR_ID = 0x2950a18f
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', order: List[int], force: Optional[bool]=None):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.order = order
self.force = force
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ReorderPinnedForumTopicsRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'order': [] if self.order is None else self.order[:],
'force': self.force
}
def _bytes(self):
return b''.join((
b'\x8f\xa1P)',
struct.pack('<I', (0 if self.force is None or self.force is False else 1)),
self.channel._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.order)),b''.join(struct.pack('<i', x) for x in self.order),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_force = bool(flags & 1)
_channel = reader.tgread_object()
reader.read_int()
_order = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_order.append(_x)
return cls(channel=_channel, order=_order, force=_force)
class ReorderUsernamesRequest(TLRequest):
CONSTRUCTOR_ID = 0xb45ced1d
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel: 'TypeInputChannel', order: List[str]):
"""
:returns Bool: This type has no constructors.
"""
self.channel = channel
self.order = order
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ReorderUsernamesRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'order': [] if self.order is None else self.order[:]
}
def _bytes(self):
return b''.join((
b'\x1d\xed\\\xb4',
self.channel._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.order)),b''.join(self.serialize_bytes(x) for x in self.order),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
reader.read_int()
_order = []
for _ in range(reader.read_int()):
_x = reader.tgread_string()
_order.append(_x)
return cls(channel=_channel, order=_order)
class ReportAntiSpamFalsePositiveRequest(TLRequest):
CONSTRUCTOR_ID = 0xa850a693
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel: 'TypeInputChannel', msg_id: int):
"""
:returns Bool: This type has no constructors.
"""
self.channel = channel
self.msg_id = msg_id
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ReportAntiSpamFalsePositiveRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'msg_id': self.msg_id
}
def _bytes(self):
return b''.join((
b'\x93\xa6P\xa8',
self.channel._bytes(),
struct.pack('<i', self.msg_id),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_msg_id = reader.read_int()
return cls(channel=_channel, msg_id=_msg_id)
class ReportSpamRequest(TLRequest):
CONSTRUCTOR_ID = 0xf44a8315
SUBCLASS_OF_ID = 0xf5b399ac
# noinspection PyShadowingBuiltins
def __init__(self, channel: 'TypeInputChannel', participant: 'TypeInputPeer', id: List[int]):
"""
:returns Bool: This type has no constructors.
"""
self.channel = channel
self.participant = participant
self.id = id
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
self.participant = utils.get_input_peer(await client.get_input_entity(self.participant))
def to_dict(self):
return {
'_': 'ReportSpamRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'participant': self.participant.to_dict() if isinstance(self.participant, TLObject) else self.participant,
'id': [] if self.id is None else self.id[:]
}
def _bytes(self):
return b''.join((
b'\x15\x83J\xf4',
self.channel._bytes(),
self.participant._bytes(),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.id)),b''.join(struct.pack('<i', x) for x in self.id),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_participant = reader.tgread_object()
reader.read_int()
_id = []
for _ in range(reader.read_int()):
_x = reader.read_int()
_id.append(_x)
return cls(channel=_channel, participant=_participant, id=_id)
class SetDiscussionGroupRequest(TLRequest):
CONSTRUCTOR_ID = 0x40582bb2
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, broadcast: 'TypeInputChannel', group: 'TypeInputChannel'):
"""
:returns Bool: This type has no constructors.
"""
self.broadcast = broadcast
self.group = group
async def resolve(self, client, utils):
self.broadcast = utils.get_input_channel(await client.get_input_entity(self.broadcast))
self.group = utils.get_input_channel(await client.get_input_entity(self.group))
def to_dict(self):
return {
'_': 'SetDiscussionGroupRequest',
'broadcast': self.broadcast.to_dict() if isinstance(self.broadcast, TLObject) else self.broadcast,
'group': self.group.to_dict() if isinstance(self.group, TLObject) else self.group
}
def _bytes(self):
return b''.join((
b'\xb2+X@',
self.broadcast._bytes(),
self.group._bytes(),
))
@classmethod
def from_reader(cls, reader):
_broadcast = reader.tgread_object()
_group = reader.tgread_object()
return cls(broadcast=_broadcast, group=_group)
class SetStickersRequest(TLRequest):
CONSTRUCTOR_ID = 0xea8ca4f9
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel: 'TypeInputChannel', stickerset: 'TypeInputStickerSet'):
"""
:returns Bool: This type has no constructors.
"""
self.channel = channel
self.stickerset = stickerset
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'SetStickersRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'stickerset': self.stickerset.to_dict() if isinstance(self.stickerset, TLObject) else self.stickerset
}
def _bytes(self):
return b''.join((
b'\xf9\xa4\x8c\xea',
self.channel._bytes(),
self.stickerset._bytes(),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_stickerset = reader.tgread_object()
return cls(channel=_channel, stickerset=_stickerset)
class ToggleAntiSpamRequest(TLRequest):
CONSTRUCTOR_ID = 0x68f3e4eb
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', enabled: bool):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.enabled = enabled
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ToggleAntiSpamRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'enabled': self.enabled
}
def _bytes(self):
return b''.join((
b'\xeb\xe4\xf3h',
self.channel._bytes(),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return cls(channel=_channel, enabled=_enabled)
class ToggleForumRequest(TLRequest):
CONSTRUCTOR_ID = 0xa4298b29
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', enabled: bool):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.enabled = enabled
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ToggleForumRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'enabled': self.enabled
}
def _bytes(self):
return b''.join((
b')\x8b)\xa4',
self.channel._bytes(),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return cls(channel=_channel, enabled=_enabled)
class ToggleJoinRequestRequest(TLRequest):
CONSTRUCTOR_ID = 0x4c2985b6
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', enabled: bool):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.enabled = enabled
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ToggleJoinRequestRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'enabled': self.enabled
}
def _bytes(self):
return b''.join((
b'\xb6\x85)L',
self.channel._bytes(),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return cls(channel=_channel, enabled=_enabled)
class ToggleJoinToSendRequest(TLRequest):
CONSTRUCTOR_ID = 0xe4cb9580
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', enabled: bool):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.enabled = enabled
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ToggleJoinToSendRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'enabled': self.enabled
}
def _bytes(self):
return b''.join((
b'\x80\x95\xcb\xe4',
self.channel._bytes(),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return cls(channel=_channel, enabled=_enabled)
class ToggleParticipantsHiddenRequest(TLRequest):
CONSTRUCTOR_ID = 0x6a6e7854
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', enabled: bool):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.enabled = enabled
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ToggleParticipantsHiddenRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'enabled': self.enabled
}
def _bytes(self):
return b''.join((
b'Txnj',
self.channel._bytes(),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return cls(channel=_channel, enabled=_enabled)
class TogglePreHistoryHiddenRequest(TLRequest):
CONSTRUCTOR_ID = 0xeabbb94c
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', enabled: bool):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.enabled = enabled
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'TogglePreHistoryHiddenRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'enabled': self.enabled
}
def _bytes(self):
return b''.join((
b'L\xb9\xbb\xea',
self.channel._bytes(),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return cls(channel=_channel, enabled=_enabled)
class ToggleSignaturesRequest(TLRequest):
CONSTRUCTOR_ID = 0x1f69b606
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', enabled: bool):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.enabled = enabled
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ToggleSignaturesRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'enabled': self.enabled
}
def _bytes(self):
return b''.join((
b'\x06\xb6i\x1f',
self.channel._bytes(),
b'\xb5ur\x99' if self.enabled else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_enabled = reader.tgread_bool()
return cls(channel=_channel, enabled=_enabled)
class ToggleSlowModeRequest(TLRequest):
CONSTRUCTOR_ID = 0xedd49ef0
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', seconds: int):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.seconds = seconds
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ToggleSlowModeRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'seconds': self.seconds
}
def _bytes(self):
return b''.join((
b'\xf0\x9e\xd4\xed',
self.channel._bytes(),
struct.pack('<i', self.seconds),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_seconds = reader.read_int()
return cls(channel=_channel, seconds=_seconds)
class ToggleUsernameRequest(TLRequest):
CONSTRUCTOR_ID = 0x50f24105
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel: 'TypeInputChannel', username: str, active: bool):
"""
:returns Bool: This type has no constructors.
"""
self.channel = channel
self.username = username
self.active = active
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ToggleUsernameRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'username': self.username,
'active': self.active
}
def _bytes(self):
return b''.join((
b'\x05A\xf2P',
self.channel._bytes(),
self.serialize_bytes(self.username),
b'\xb5ur\x99' if self.active else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_username = reader.tgread_string()
_active = reader.tgread_bool()
return cls(channel=_channel, username=_username, active=_active)
class UpdatePinnedForumTopicRequest(TLRequest):
CONSTRUCTOR_ID = 0x6c2d9026
SUBCLASS_OF_ID = 0x8af52aac
def __init__(self, channel: 'TypeInputChannel', topic_id: int, pinned: bool):
"""
:returns Updates: Instance of either UpdatesTooLong, UpdateShortMessage, UpdateShortChatMessage, UpdateShort, UpdatesCombined, Updates, UpdateShortSentMessage.
"""
self.channel = channel
self.topic_id = topic_id
self.pinned = pinned
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'UpdatePinnedForumTopicRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'topic_id': self.topic_id,
'pinned': self.pinned
}
def _bytes(self):
return b''.join((
b'&\x90-l',
self.channel._bytes(),
struct.pack('<i', self.topic_id),
b'\xb5ur\x99' if self.pinned else b'7\x97y\xbc',
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_topic_id = reader.read_int()
_pinned = reader.tgread_bool()
return cls(channel=_channel, topic_id=_topic_id, pinned=_pinned)
class UpdateUsernameRequest(TLRequest):
CONSTRUCTOR_ID = 0x3514b3de
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel: 'TypeInputChannel', username: str):
"""
:returns Bool: This type has no constructors.
"""
self.channel = channel
self.username = username
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'UpdateUsernameRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'username': self.username
}
def _bytes(self):
return b''.join((
b'\xde\xb3\x145',
self.channel._bytes(),
self.serialize_bytes(self.username),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_username = reader.tgread_string()
return cls(channel=_channel, username=_username)
class ViewSponsoredMessageRequest(TLRequest):
CONSTRUCTOR_ID = 0xbeaedb94
SUBCLASS_OF_ID = 0xf5b399ac
def __init__(self, channel: 'TypeInputChannel', random_id: bytes=None):
"""
:returns Bool: This type has no constructors.
"""
self.channel = channel
self.random_id = random_id if random_id is not None else int.from_bytes(os.urandom(4), 'big', signed=True)
async def resolve(self, client, utils):
self.channel = utils.get_input_channel(await client.get_input_entity(self.channel))
def to_dict(self):
return {
'_': 'ViewSponsoredMessageRequest',
'channel': self.channel.to_dict() if isinstance(self.channel, TLObject) else self.channel,
'random_id': self.random_id
}
def _bytes(self):
return b''.join((
b'\x94\xdb\xae\xbe',
self.channel._bytes(),
self.serialize_bytes(self.random_id),
))
@classmethod
def from_reader(cls, reader):
_channel = reader.tgread_object()
_random_id = reader.tgread_bytes()
return cls(channel=_channel, random_id=_random_id) | PypiClean |
/EtherollApp-2020.322-py3-none-any.whl/etherollapp/etheroll/controller.py | from dotenv import load_dotenv
from eth_utils import to_checksum_address
from kivy.app import App
from kivy.clock import Clock, mainthread
from kivy.logger import Logger
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.floatlayout import FloatLayout
from kivy.utils import platform
from kivymd.bottomsheet import MDListBottomSheet
from kivymd.theming import ThemeManager
from raven import Client
from requests.exceptions import ConnectionError
from etherollapp.etheroll.constants import ENV_PATH
from etherollapp.etheroll.flashqrcode import FlashQrCodeScreen
from etherollapp.etheroll.settings import Settings
from etherollapp.etheroll.settings_screen import SettingsScreen
from etherollapp.etheroll.switchaccount import SwitchAccountScreen
from etherollapp.etheroll.ui_utils import Dialog, load_kv_from_py
from etherollapp.etheroll.utils import run_in_thread
from etherollapp.osc.osc_app_server import OscAppServer
from etherollapp.sentry_utils import configure_sentry
from etherollapp.service.utils import start_roll_polling_service
load_kv_from_py(__file__)
class Controller(FloatLayout):
current_account = ObjectProperty(allownone=True)
current_account_string = StringProperty(allownone=True)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# disables the roll screen until `preload_account_utils` is done
# disabling doesn't seem to work within the scheduled method
# self.roll_screen.toggle_widgets(False)
self.disabled = True
Clock.schedule_once(self._after_init)
self._account_passwords = {}
def _after_init(self, dt):
"""Inits pyethapp and binds events."""
Clock.schedule_once(self.preload_account_utils)
self.bind_roll_button()
self.bind_current_account_string()
self.bind_chances_roll_under()
self.bind_wager_property()
self.bind_profit_property()
self.bind_screen_manager_on_current_screen()
self.bind_keyboard()
self.register_screens()
def on_keyboard(self, window, key, *args):
"""
Handles the back button (Android) and ESC key. Goes back to the
previous screen, dicards dialogs or exits the application if none left.
"""
if key == 27:
if Dialog.dialogs:
Dialog.dismiss_all_dialogs()
return True
from etherollapp.etheroll.ui_utils import SubScreen
current_screen = self.screen_manager.current_screen
# if is sub-screen loads previous and stops the propagation
# otherwise propagates the key to exit
if isinstance(current_screen, SubScreen):
current_screen.on_back()
return True
return False
def on_current_account(self, instance, current_account):
self.current_account_string = (
current_account and f'0x{current_account.address.hex()}')
@property
def pyetheroll(self):
"""
Gets or creates the Etheroll object.
Also recreates the object if the chain_id changed.
"""
from pyetheroll.etheroll import Etheroll
chain_id = Settings.get_stored_network()
return Etheroll.get_or_create(chain_id)
@property
def account_utils(self):
"""Gets or creates the AccountUtils object so it loads lazily."""
from eth_accounts.account_utils import AccountUtils
keystore_dir = Settings.get_keystore_path()
return AccountUtils.get_or_create(keystore_dir)
def preload_account_utils(self, dt):
"""Preloads `AccountUtils`, since it takes few seconds on Android."""
account_utils = self.account_utils
self.disabled = False
# not using that returned value, but it peaces linter
return account_utils
def bind_wager_property(self):
"""Binds wager recap label."""
roll_under_recap = self.roll_screen.ids.roll_under_recap_id
bet_size = self.roll_screen.ids.bet_size_id
bet_size_input = bet_size.ids.bet_size_input_id
# TODO: input validation, if `bet_size.text == ''`
bet_size_input.bind(text=roll_under_recap.setter('wager_property'))
# synchro once now
roll_under_recap.wager_property = bet_size_input.text
def bind_chances_roll_under(self):
"""Binds chances of winning recap label."""
roll_under_recap = self.roll_screen.ids.roll_under_recap_id
# roll under recap label
chance_of_winning = self.roll_screen.ids.chance_of_winning_id
chances_input = chance_of_winning.ids.chances_input_id
# TODO: input validation, if `chances_input.text == ''`
chances_input.bind(text=roll_under_recap.setter('roll_under_property'))
# synchronises it now
roll_under_recap.roll_under_property = chances_input.text
def bind_roll_button(self):
"""Binds roll screen "Roll" button to controller roll()."""
roll_button = self.roll_screen.ids.roll_button_id
roll_button.bind(on_release=lambda instance: self.roll())
def bind_current_account_string(self):
"""Binds Controller.current_account -> RollScreen.current_account"""
roll_screen = self.roll_screen
self.bind(
current_account_string=roll_screen.setter('current_account_string')
)
def bind_keyboard(self):
"""Binds keyboard keys to actions."""
from kivy.core.window import Window
Window.bind(on_keyboard=self.on_keyboard)
def bind_profit_property(self):
"""Binds profit property with bet value and chances changes."""
# chances -> profit
chance_of_winning = self.roll_screen.ids.chance_of_winning_id
chances_input = chance_of_winning.ids.chances_input_id
chances_input.bind(
text=lambda instance, value: self.update_profit_property())
# bet value -> profit
bet_size = self.roll_screen.ids.bet_size_id
bet_size_input = bet_size.ids.bet_size_input_id
bet_size_input.bind(
text=lambda instance, value: self.update_profit_property())
# synchro once now
self.update_profit_property()
def bind_screen_manager_on_current_screen(self):
"""SwitchAccountScreen.current_account -> self.current_account."""
def on_pre_add_widget(screen_manager, screen):
"""Should only be called twice per screen instance."""
if type(screen) is SwitchAccountScreen and \
not self.screen_manager.has_screen(screen.name):
screen.bind(current_account=self.setter('current_account'))
screen.ids.send_id.bind(
current_account=self.setter('current_account'))
screen.ids.send_id.bind(on_send=self.on_send)
self.screen_manager.bind(on_pre_add_widget=on_pre_add_widget)
def register_screens(self):
# lazy loading
from etherollapp.etheroll.about import AboutScreen
from etherollapp.etheroll.roll_results import RollResultsScreen
screen_dicts = {
"about_screen": AboutScreen,
'flashqrcode': FlashQrCodeScreen,
"roll_results_screen": RollResultsScreen,
"settings_screen": SettingsScreen,
"switch_account_screen": SwitchAccountScreen,
}
for screen_name, screen_type in screen_dicts.items():
self.screen_manager.register_screen(screen_type, screen_name)
def update_profit_property(self):
house_edge = 1.0 / 100
bet_size = self.roll_screen.ids.bet_size_id.value
chances_win = self.roll_screen.ids.chance_of_winning_id.value
chances_loss = 100 - chances_win
roll_under_recap = self.roll_screen.ids.roll_under_recap_id
roll_under_recap.profit_property = 0
if chances_win != 0 and chances_loss != 0:
payout = ((chances_loss / chances_win) * bet_size) + bet_size
payout *= (1 - house_edge)
roll_under_recap.profit_property = payout - bet_size
@property
def navigation(self):
return self.ids.navigation_id
@property
def screen_manager(self):
return self.ids.screen_manager_id
@property
def roll_screen(self):
return self.screen_manager.get_screen('roll_screen')
@property
def switch_account_screen(self):
return self.screen_manager.get_screen('switch_account_screen')
@property
def roll_results_screen(self):
return self.screen_manager.get_screen('roll_results_screen')
@property
def settings_screen(self):
return self.screen_manager.get_screen('settings_screen')
@property
def about_screen(self):
return self.screen_manager.get_screen('about_screen')
def prompt_password_dialog(self, account, on_password_callback):
"""Prompt the password dialog."""
# lazy loading
from etherollapp.etheroll.passwordform import PasswordForm
dialog = PasswordForm.dialog(account)
def on_unlock_clicked(instance, dialog, account, password):
"""Caches the password and call roll method again."""
self._account_passwords[account.address.hex()] = password
dialog.dismiss()
on_password_callback()
dialog.content.bind(on_unlock=on_unlock_clicked)
dialog.open()
return dialog
def get_account_password(self, account, on_password_callback):
"""Retrieve cached account password or prompt dialog."""
address = account.address.hex()
try:
return self._account_passwords[address]
except KeyError:
self.prompt_password_dialog(account, on_password_callback)
@staticmethod
def on_account_none():
"""Error dialog on no account selected."""
title = "No account selected"
body = "Please select an account before rolling"
dialog = Dialog.create_dialog(title, body)
dialog.open()
@staticmethod
@mainthread
def dialog_roll_success(tx_hash):
title = "Rolled successfully"
body = "Transaction hash:\n" + tx_hash.hex()
dialog = Dialog.create_dialog(title, body)
dialog.open()
@staticmethod
@mainthread
def dialog_transaction_success(tx_hash):
title = "Transaction successful"
body = "Transaction hash:\n" + tx_hash.hex()
dialog = Dialog.create_dialog(title, body)
dialog.open()
@mainthread
def dialog_roll_error(self, exception):
"""
Shows different error message depending on the exception.
On "MAC mismatch" (wrong password), void the cached password so the
user can try again refs:
https://github.com/AndreMiras/EtherollApp/issues/9
"""
title = "Error rolling"
body = str(exception)
if body == 'MAC mismatch':
title = "Wrong password"
body = "Can't unlock wallet, wrong password."
account = self.current_account
self._account_passwords.pop(account.address.hex())
dialog = Dialog.create_dialog(title, body)
dialog.open()
@run_in_thread
def player_roll_dice(
self, bet_size_eth, chances, wallet_path, password,
gas_price_gwei):
"""
Sending the bet to the smart contract requires signing a transaction
which requires CPU computation to unlock the account, hence this
is ran in a thread.
"""
roll_screen = self.roll_screen
try:
Dialog.snackbar_message("Sending bet...")
roll_screen.toggle_widgets(False)
bet_size_wei = int(bet_size_eth * 1e18)
gas_price_wei = int(gas_price_gwei * 1e9)
tx_hash = self.pyetheroll.player_roll_dice(
bet_size_wei, chances, wallet_path, password, gas_price_wei)
except (ValueError, ConnectionError) as exception:
roll_screen.toggle_widgets(True)
self.dialog_roll_error(exception)
return
roll_screen.toggle_widgets(True)
self.dialog_roll_success(tx_hash)
@staticmethod
def start_services():
"""
Starts both roll polling service and OSC service.
The roll polling service is getting the OSC server connection
parameters so it can communicate to it.
"""
app = App.get_running_app()
osc_server, sockname = OscAppServer.get_or_create(app)
server_address, server_port = sockname
print(sockname)
arguments = {
'osc_server_address': server_address,
'osc_server_port': server_port,
}
start_roll_polling_service(arguments)
def roll(self):
"""
Retrieves bet parameters from user input and sends it as a signed
transaction to the smart contract.
"""
roll_screen = self.roll_screen
roll_input = roll_screen.get_roll_input()
bet_size_eth = roll_input['bet_size']
chances = roll_input['chances']
gas_price_gwei = Settings.get_stored_gas_price()
account = self.current_account
if account is None:
self.on_account_none()
return
wallet_path = account.path
password = self.get_account_password(account, self.roll)
if password is not None:
self.player_roll_dice(
bet_size_eth, chances, wallet_path, password, gas_price_gwei)
# restarts roll polling service to reset the roll activity period
self.start_services()
def transaction(
self, to, amount_eth, wallet_path, password, gas_price_gwei):
"""Converts input parameters for the underlying library."""
value = int(amount_eth * 1e18)
gas_price_wei = int(gas_price_gwei * 1e9)
to = to_checksum_address(to)
Dialog.snackbar_message("Sending transaction...")
tx_hash = self.pyetheroll.transaction(
to, value, wallet_path, password, gas_price_wei)
self.dialog_transaction_success(tx_hash)
def send(self, address, amount_eth):
"""Retrieves fields to complete the `transaction()` call."""
gas_price_gwei = Settings.get_stored_gas_price()
account = self.current_account
if account is None:
self.on_account_none()
return
wallet_path = account.path
password = self.get_account_password(
account, lambda: self.send(address, amount_eth))
if password is not None:
self.transaction(
address, amount_eth, wallet_path, password, gas_price_gwei)
def on_send(self, instance, address, amount_eth):
self.send(address, amount_eth)
def load_switch_account(self):
"""Loads the switch account screen."""
screen_manager = self.screen_manager
screen_manager.transition.direction = 'right'
screen_manager.current = 'switch_account_screen'
def load_flash_qr_code(self):
"""Loads the flash QR Code screen."""
# loads ZBarCam only when needed
from kivy_garden.zbarcam import ZBarCam # noqa
# loads the flash QR Code screen
self.screen_manager.transition.direction = 'right'
self.screen_manager.current = 'flashqrcode'
def show_qr_code(self):
"""Shows address QR Code in a dialog."""
# lazy loading
from kivy_garden.qrcode import QRCodeWidget
from kivy.metrics import dp
account = self.current_account
if not account:
return
address = "0x" + account.address.hex()
title = address
qr_code = QRCodeWidget()
qr_code.data = address
dialog = Dialog.create_dialog_content_helper(
title=title,
content=qr_code)
# workaround for MDDialog container size (too small by default)
dialog.ids.container.size_hint_y = 1
dialog.height = dp(500)
dialog.add_action_button(
"OK",
action=lambda *x: dialog.dismiss())
dialog.open()
return dialog
def copy_address_clipboard(self):
"""Copies the current account address to the clipboard."""
# lazy loading
from kivy.core.clipboard import Clipboard
account = self.current_account
if not account:
return
address = "0x" + account.address.hex()
Clipboard.copy(address)
def open_address_options(self):
"""
Loads the address options bottom sheet.
"""
bottom_sheet = MDListBottomSheet()
bottom_sheet.add_item(
'Switch account',
lambda x: self.load_switch_account(), icon='swap-horizontal')
bottom_sheet.add_item(
'Show QR Code',
lambda x: self.show_qr_code(), icon='information')
bottom_sheet.add_item(
'Copy address',
lambda x: self.copy_address_clipboard(), icon='content-copy')
bottom_sheet.open()
@staticmethod
def on_permission_error(exception):
title = "Permission denied"
body = str(exception.args)
dialog = Dialog.create_dialog(title, body)
dialog.open()
class EtherollApp(App):
theme_cls = ThemeManager()
def build(self):
self.icon = "docs/images/icon.png"
self.theme_cls.theme_style = 'Dark'
self.theme_cls.primary_palette = 'Indigo'
Controller.start_services()
return Controller()
def main():
load_dotenv(dotenv_path=ENV_PATH)
# only send Android errors to Sentry
in_debug = platform != "android"
client = configure_sentry(in_debug)
try:
EtherollApp().run()
except Exception:
if type(client) == Client:
Logger.info(
'Errors will be sent to Sentry, run with "--debug" if you '
'are a developper and want to the error in the shell.')
client.captureException()
if __name__ == '__main__':
main() | PypiClean |
/LDtoolsets-0.0.14.tar.gz/LDtoolsets-0.0.14/LDtools/sumstat.py |
__all__ = ['p2z', 'Sumstat', 'read_sumstat']
# Cell
import yaml
import numpy as np
import pandas as pd
from scipy.stats import norm
from .utils import *
# Cell
def p2z(pval,beta,twoside=True):
if twoside:
pval = pval/2
z=np.abs(norm.ppf(pval))
ind=beta<0
z[ind]=-z[ind]
return z
class Sumstat:
def __init__(self,sumstat_path,config_file=None,rename=True):
self.ss = self.read_sumstat(sumstat_path,config_file,rename)
def __repr__(self):
return "sumstat:% s" % (self.ss)
#functions to read sumstats
def read_sumstat(self,file, config_file,rename):
if config_file is not None:
config_file = yaml.safe_load(open(config_file, 'r'))
return read_sumstat(file,config_file,rename)
def extractbyregion(self,region):
sumstats = self.ss
idx = (sumstats.CHR == region[0]) & (sumstats.POS >= region[1]) & (sumstats.POS <= region[2])
print('this region',region,'has',sum(idx),'SNPs in Sumstat')
self.ss = sumstats[idx]
def extractbyvariants(self,variants,notin=False):
idx = self.ss.SNP.isin(variants)
if notin:
idx = idx == False
#update sumstats
self.ss = self.ss[idx]
def calculateZ(self):
self.ss['Z'] = list(p2z(self.ss.P,self.ss.BETA))
def match_ss(self,bim):
self.ss = check_ss1(self.ss,bim)
# Cell
def read_sumstat(file, config,rename=True):
try:
sumstats = pd.read_csv(file, compression='gzip', header=0, sep='\t', quotechar='"')
except:
sumstats = pd.read_csv(file, header=0, sep='\t', quotechar='"')
if config is not None:
try:
ID = config.pop('ID').split(',')
sumstats = sumstats.loc[:,list(config.values())]
sumstats.columns = list(config.keys())
sumstats.index = namebyordA0_A1(sumstats[ID],cols=ID)
except:
raise ValueError(f'According to config_file, input summary statistics should have the following columns: %s' % list(config.values()))
sumstats.columns = list(config.keys())
if rename:
sumstats.SNP = 'chr'+sumstats.CHR.astype(str) + ':' + sumstats.POS.astype(str) + ':' + sumstats.A0.astype(str) + ':' + sumstats.A1.astype(str)
sumstats.CHR = sumstats.CHR.astype(int)
sumstats.POS = sumstats.POS.astype(int)
return sumstats | PypiClean |
/Nerd-Color-0.1.0.tar.gz/Nerd-Color-0.1.0/src/nerdcolor/__main__.py | import argparse
import sys
from nerdcolor.nerdcolor import SCHEMES, VERSION, Colorscheme
def print_colorschemes():
for key in SCHEMES.keys():
scheme = Colorscheme([key], "true", delimiter="char", bold=True)
print(scheme.colorize(key))
def print_examples():
string = (
"Full versions:\n\n\t"
"1) nerdcolor --ansi --delimiter line --bold --palette red green blue\n\t"
"2) nerdcolor --true --delimiter word --palette dracula\n\t"
'3) nerdcolor --true --delimiter char --palette "#EFFFFB" "#50D890" "#4F98CA" "#7874F2"\n\n'
"Shortened versions:\n\n\t"
"1) nerdcolor -a -b -p red green blue\n\t"
"2) nerdcolor -t -d word -p dracula\n\t"
'3) nerdcolor -t -d char -p "#EFFFFB" "#50D890" "#4F98CA" "#7874F2"'
)
print(string)
def main():
parser = argparse.ArgumentParser(
prog="nerdcolor",
description="A simple CLI colorizer.",
epilog="Project home page: https://github.com/0xHaru/Nerd-Color",
)
parser.version = VERSION
parser.add_argument(
"-a",
"--ansi",
help="use ANSI colors",
action="store_true",
default=False,
)
parser.add_argument(
"-t",
"--true",
help="use true colors",
action="store_true",
default=False,
)
parser.add_argument(
"-d",
"--delimiter",
type=str,
help="set the delimiter",
default="line",
)
parser.add_argument(
"-b", "--bold", help="use bold text", action="store_true", default=False
)
parser.add_argument(
"-p",
"--palette",
type=str,
help="set the color palette",
nargs="+",
)
parser.add_argument(
"-c",
"--colorschemes",
help="show available color schemes",
action="store_true",
default=False,
)
parser.add_argument(
"-e",
"--examples",
help="show usage examples",
action="store_true",
default=False,
)
parser.add_argument(
"-v", "--version", help="show program's version", action="version"
)
args = parser.parse_args()
if args.colorschemes:
print_colorschemes()
exit(0)
if args.examples:
print_examples()
exit(0)
if not args.palette:
print("Error: missing palette")
exit(1)
if not args.ansi and not args.true:
print("Error: --ansi or --true must be set")
exit(1)
if args.ansi and args.true:
print("Error: --ansi and --true cannot both be set")
exit(1)
color_type = "ansi" if args.ansi else "true"
scheme = Colorscheme(
args.palette, color_type, delimiter=args.delimiter, bold=args.bold
)
print(scheme.colorize(sys.stdin.read()), end="")
if __name__ == "__main__":
main() | PypiClean |
/Da_Ponz-4.0.4-py3-none-any.whl/da_ponz/strategies/ltsm.py | import da_ponz.strategies.common_functions as common_functions
import keras
import math
import sklearn.metrics as skl_met
def calculate_rmse(batch_size, data, epochs, neuron, scaler, test_scaled, train_scaled):
model = create_model(batch_size, epochs, neuron, train_scaled)
train_reshaped = train_scaled[:, :-1].reshape(train_scaled.shape[0], 1, train_scaled.shape[1] - 1)
model.predict(train_reshaped, batch_size=batch_size)
test_reshaped = test_scaled[:, :-1].reshape(test_scaled.shape[0], 1, test_scaled.shape[1] - 1)
output = model.predict(test_reshaped, batch_size=batch_size)
predictions = common_functions.calculate_yhat(data, output, scaler, test_scaled)
rmse = math.sqrt(skl_met.mean_squared_error(data[-len(predictions):], predictions))
return rmse
def create_data_sets(batch_size, columns_list, diff_data):
supervised_data = common_functions.make_data_supervised(columns_list, diff_data, output_var='close')
split_point = int(round(supervised_data.shape[0] * .333))
test = common_functions.split_data_set(batch_size, supervised_data.values, slice(-split_point, None))
train = common_functions.split_data_set(batch_size, supervised_data.values, slice(0, -split_point))
scaler = common_functions.build_scaler((-1, 1), 'MinMaxScaler', train)
test_scaled = common_functions.scale_data(test, scaler)
train_scaled = common_functions.scale_data(train, scaler)
return scaler, test_scaled, train_scaled
def create_model(batch_size, nb_epoch, neurons, train):
X = train[:, :-1] # Drops the last column, which has the output_var
X = X.reshape((X.shape[0], 1, X.shape[1]))
y = train[:, -1] # Keeps the last column
model = keras.Sequential()
model.add(keras.layers.LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(keras.layers.Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, batch_size=batch_size, epochs=1, shuffle=False, verbose=0)
model.reset_states()
return model | PypiClean |
/GQCMS-0.0.4-py3-none-any.whl/gqcms/PlottingTools/LegendPlotter.py | import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
def sitesDrawer(postitions, color_dict={}, edge_color_dict={}, axis="NotGiven"):
"""
draws sites on the canvas on the required position
positions, list of tuples: the positions of the sites
color_dict, dict: contains the colors of every individual site
edge_color_dict, dict: contains the edge color of every individual site
axis, pyplot.axes object: the axis to draw the figure on, default Nonegiven, will just draw it on a new figure
"""
if axis == "NotGiven":
fig, axis = plt.subplots()
for index, position in enumerate(postitions):
if index in color_dict.keys():
color = color_dict[index]
else:
color = "white"
if index in edge_color_dict.keys():
edgecolor = edge_color_dict[index]
linewidth = 3
else:
edgecolor = "black"
linewidth = 1.5
rectangle = plt.Rectangle(
position,
0.3,
0.3,
facecolor=color,
edgecolor=edgecolor,
lw=linewidth,
fill=True,
zorder=5,
)
axis.add_patch(rectangle)
def colorDomains(domains):
"""
Will read in the domains and give the right colorscheme
domains, list of tuples: the domains of interest
"""
pallete = sns.color_palette("pastel")
colordict = {}
for index, domain in enumerate(domains):
for site in domain:
colordict[site] = pallete[index]
return colordict
def edgeColorDomains(domains):
"""
Will read in the domains and give the right edgecolorscheme
domains, list of tuples: the domains of interest
"""
pallete = sns.color_palette()
colordict = {}
for index, domain in enumerate(domains):
for site in domain:
colordict[site] = pallete[index]
return colordict
def lineDrawer(position_pairs, linecolor_dict={}, axis="NotGiven"):
"""
Will draw lines on a figure.
position_pairs, list of lists: contains all the pairs between which you want to draw a line.
linecolor_dict, dict: contains the colors of each line
axis, pyplot.axes object: the axis on which to make the drawing, if NotGiven, will make a new drawing
note:
If the drawing is circular, the position_pairs needs to end with the same point as it started
example: [[point1, point2], [point2, point3], ... , [pointX, point1]]
"""
if axis == "NotGiven":
fig, axis = plt.subplots()
for index, pair in enumerate(position_pairs):
if index in linecolor_dict.keys():
color = linecolor_dict[index]
else:
color = "black"
axis.plot(
(pair[0][0] + 0.15, pair[1][0] + 0.15),
(pair[0][1] + 0.15, pair[1][1] + 0.15),
color=color,
zorder=0,
)
def drawElectrons(alpha_pos, beta_pos, axis="NotGiven"):
"""
Will draw the electrons in the right position
alpha_pos, list of tuples: the postitions of the alpha electrons
beta_pos, liest of tuples: the postitions of the beta electrons
axis, pyplot.axes object: the axis on which the electrons need to be drawn, if NotGiven make a new image
"""
if axis == "NotGiven":
fig, axis = plt.subplots()
for position in alpha_pos:
axis.arrow(
position[0] + 0.1,
position[1] + 0.05,
0,
0.20,
zorder=10,
color="black",
head_width=0.07,
length_includes_head=True,
head_length=0.05,
)
for position in beta_pos:
axis.arrow(
position[0] + 0.2,
position[1] + 0.2 + 0.05,
0,
-0.20,
zorder=10,
color="black",
head_width=0.07,
length_includes_head=True,
head_length=0.05,
)
def plotONVs(
pos_dict: dict,
ONV_list: list,
legend: list,
edge_color_dict: dict = None,
title: str = None,
) -> None:
"""
plots ONVs for figure legends
input
:param pos_dict: site positions on the canvas
:param ONV_list: list of ONVs, [[[alpha positions], [beta positions]], ...]
:param legend: a list containing colors and markers used in the plot [(color, marker), ...]
:param edge_color_dict: dict containing edge color for each site, if None, edge_color will be black
:param title: the plot title, if None the plot will not be saved
"""
fig, ax = plt.subplots(len(ONV_list) // 3 + 1, 3, figsize=(10, 7))
ax = ax.flatten()
for index, ONV in enumerate(ONV_list):
axis = ax[index]
sitesDrawer(
[pos_dict[i] for i in range(len(pos_dict.keys()))],
edge_color_dict=edge_color_dict,
axis=axis,
)
lineDrawer([(pos_dict[i], pos_dict[i + 1]) for i in range(2)], axis=axis)
alphas = ONV[0]
betas = ONV[1]
alpha_pos = [pos_dict[pos] for pos in alphas]
beta_pos = [pos_dict[pos] for pos in betas]
drawElectrons(alpha_pos=alpha_pos, beta_pos=beta_pos, axis=axis)
axis.plot(0.5, 0.4, marker=legend[index][1], color=legend[index][0])
for axis in ax:
axis.axis("off")
if title is None:
plt.show()
else:
plt.savefig(title)
def plotDomains(
pos_dict: dict, domain_list: list, legend: list, title: str = None
) -> None:
"""
plots domains for figure legends
input
:param pos_dict: site positions on the canvas
:param domain_list: list of domains
:param legend: a list containing colors and markers used in the plot [(color, marker), ...]
:param title: the plot title, if None the plot will not be saved
"""
fig, ax = plt.subplots(len(domain_list) // 3 + 1, 3, figsize=(10, 6))
ax = ax.flatten()
for index, partition in enumerate(domain_list):
sitesDrawer(
[pos_dict[i] for i in range(len(pos_dict.keys()))],
colorDomains(partition),
{},
axis=ax[index],
)
lineDrawer([pos_dict[i] for i in range(len(pos_dict.keys()))], axis=ax[index])
ax[index].plot(0.5, 0.4, marker=legend[index][1], color=legend[index][0])
for axis in ax:
axis.axis("off")
if title is None:
plt.show()
else:
plt.savefig(title)
def CombineFigures(
path_to_graph: str, path_to_legend: str, title: str, rescaler: float = 0
):
"""
Combines a figure and a legend.
:param path_to_graph: the path to the graph
:param path_to_legend: the path to the legend corresponding to the graph
:param title: name of the new plot
:param rescaler: allows for manual rescaling of the legend if automatic values do not work out
"""
graph = Image.open(path_to_graph)
legend = Image.open(path_to_legend)
legend = legend.resize((graph.size[0], graph.size[1] + rescaler), Image.ANTIALIAS)
new_im = Image.new("RGB", (graph.size[0], legend.size[1] + graph.size[1]))
x_offset = 0
for im in (graph, legend):
new_im.paste(im, (0, x_offset))
x_offset += im.size[1]
new_im.save(title, dpi=(300, 300)) | PypiClean |
/Cookies_colorama-0.4.6.tar.gz/Cookies_colorama-0.4.6/Cookies_colorama/winterm.py | from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
BRIGHT_BACKGROUND = 0x80 # dim text, bright background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
# In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
# So that LIGHT_EX colors and BRIGHT style do not clobber each other,
# we track them separately, since LIGHT_EX is overwritten by Fore/Back
# and BRIGHT is overwritten by Style codes.
self._light = 0
def get_attrs(self):
return self._fore + self._back * 16 + (self._style | self._light)
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
self._light = 0
def fore(self, fore=None, light=False, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
# Emulate LIGHT_EX with BRIGHT Style
if light:
self._light |= WinStyle.BRIGHT
else:
self._light &= ~WinStyle.BRIGHT
self.set_console(on_stderr=on_stderr)
def back(self, back=None, light=False, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
# Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
if light:
self._light |= WinStyle.BRIGHT_BACKGROUND
else:
self._light &= ~WinStyle.BRIGHT_BACKGROUND
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def get_position(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
# I'm not currently tracking the position, so there is no default.
# position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_adjust(self, x, y, on_stderr=False):
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.get_position(handle)
adjusted_position = (position.Y + y, position.X + x)
win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
def erase_screen(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen, and move cursor to (1,1)
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
# get number of character cells before current cursor position
cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor
elif mode == 1:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor
elif mode == 2:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
if mode == 2:
# put the cursor where needed
win32.SetConsoleCursorPosition(handle, (1, 1))
def erase_line(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the line.
# 1 should clear from the cursor to the beginning of the line.
# 2 should clear the entire line.
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
elif mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
def set_title(self, title):
win32.SetConsoleTitle(title) | PypiClean |
/DracoPy-0.0.10.tar.gz/DracoPy-0.0.10/draco/docs/spec/index.md | ---
layout: spec-page
title: Draco Bitstream Specification
version: Version 2,2
version_date: Released 2017-10-25
---
<ol class="breadcrumb">
<li class=""><a href="..">Home</a></li>
<li class="">Draft Specification</li>
</ol>
{% include_relative 00.00.00.title.md %}
{% include_relative 00.00.01.version.md %}
{% include_relative 00.00.02.authors.md %}
{% include_relative 00.00.03.last.modified.md %}
{% include_relative 00.00.04.abstract.md %}
{% include_relative 00.00.05.toc.md %}
{% include_relative 01.00.00.scope.md %}
{% include_relative 03.00.00.symbols.md %}
{% include_relative 04.00.00.conventions.md %}
{% include_relative draco.decoder.md %}
{% include_relative metadata.decoder.md %}
{% include_relative connectivity.decoder.md %}
{% include_relative sequential.decoder.md %}
{% include_relative edgebreaker.decoder.md %}
{% include_relative edgebreaker.traversal.md %}
{% include_relative edgebreaker.traversal.valence.md %}
{% include_relative edgebreaker.traversal.prediction.degree.md %}
{% include_relative attributes.decoder.md %}
{% include_relative sequential.integer.attribute.decoder.md %}
{% include_relative boundary.decoder.md %}
{% include_relative prediction.decoder.md %}
{% include_relative sequential.quantization.attribute.decoder.md %}
{% include_relative sequential.normal.attribute.decoder.md %}
{% include_relative prediction.texcoords.decoder.md %}
{% include_relative prediction.normal.decoder.md %}
{% include_relative prediction.normal.transform.md %}
{% include_relative prediction.wrap.transform.md %}
{% include_relative prediction.parallelogram.decoder.md %}
{% include_relative prediction.multi.parallelogram.decoder.md %}
{% include_relative rans.decoding.md %}
{% include_relative corner.md %}
{% include_relative vector.md %}
{% include_relative core.functions.md %}
{% include_relative variable.descriptions.md %}
{% comment %}
{% include_relative 99.00.01.testing.md %}
{% endcomment %}
| PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Subst.py |
__revision__ = "src/engine/SCons/Subst.py 2014/07/05 09:42:21 garyo"
import collections
import re
import SCons.Errors
from SCons.Util import is_String, is_Sequence
# Indexed by the SUBST_* constants below.
_strconv = [SCons.Util.to_String_for_subst,
SCons.Util.to_String_for_subst,
SCons.Util.to_String_for_signature]
AllowableExceptions = (IndexError, NameError)
def SetAllowableExceptions(*excepts):
global AllowableExceptions
AllowableExceptions = [_f for _f in excepts if _f]
def raise_exception(exception, target, s):
name = exception.__class__.__name__
msg = "%s `%s' trying to evaluate `%s'" % (name, exception, s)
if target:
raise SCons.Errors.BuildError(target[0], msg)
else:
raise SCons.Errors.UserError(msg)
class Literal(object):
"""A wrapper for a string. If you use this object wrapped
around a string, then it will be interpreted as literal.
When passed to the command interpreter, all special
characters will be escaped."""
def __init__(self, lstr):
self.lstr = lstr
def __str__(self):
return self.lstr
def escape(self, escape_func):
return escape_func(self.lstr)
def for_signature(self):
return self.lstr
def is_literal(self):
return 1
def __eq__(self, other):
if not isinstance(other, Literal):
return False
return self.lstr == other.lstr
def __neq__(self, other):
return not self.__eq__(other)
class SpecialAttrWrapper(object):
"""This is a wrapper for what we call a 'Node special attribute.'
This is any of the attributes of a Node that we can reference from
Environment variable substitution, such as $TARGET.abspath or
$SOURCES[1].filebase. We implement the same methods as Literal
so we can handle special characters, plus a for_signature method,
such that we can return some canonical string during signature
calculation to avoid unnecessary rebuilds."""
def __init__(self, lstr, for_signature=None):
"""The for_signature parameter, if supplied, will be the
canonical string we return from for_signature(). Else
we will simply return lstr."""
self.lstr = lstr
if for_signature:
self.forsig = for_signature
else:
self.forsig = lstr
def __str__(self):
return self.lstr
def escape(self, escape_func):
return escape_func(self.lstr)
def for_signature(self):
return self.forsig
def is_literal(self):
return 1
def quote_spaces(arg):
"""Generic function for putting double quotes around any string that
has white space in it."""
if ' ' in arg or '\t' in arg:
return '"%s"' % arg
else:
return str(arg)
class CmdStringHolder(collections.UserString):
"""This is a special class used to hold strings generated by
scons_subst() and scons_subst_list(). It defines a special method
escape(). When passed a function with an escape algorithm for a
particular platform, it will return the contained string with the
proper escape sequences inserted.
"""
def __init__(self, cmd, literal=None):
collections.UserString.__init__(self, cmd)
self.literal = literal
def is_literal(self):
return self.literal
def escape(self, escape_func, quote_func=quote_spaces):
"""Escape the string with the supplied function. The
function is expected to take an arbitrary string, then
return it with all special characters escaped and ready
for passing to the command interpreter.
After calling this function, the next call to str() will
return the escaped string.
"""
if self.is_literal():
return escape_func(self.data)
elif ' ' in self.data or '\t' in self.data:
return quote_func(self.data)
else:
return self.data
def escape_list(mylist, escape_func):
"""Escape a list of arguments by running the specified escape_func
on every object in the list that has an escape() method."""
def escape(obj, escape_func=escape_func):
try:
e = obj.escape
except AttributeError:
return obj
else:
return e(escape_func)
return list(map(escape, mylist))
class NLWrapper(object):
"""A wrapper class that delays turning a list of sources or targets
into a NodeList until it's needed. The specified function supplied
when the object is initialized is responsible for turning raw nodes
into proxies that implement the special attributes like .abspath,
.source, etc. This way, we avoid creating those proxies just
"in case" someone is going to use $TARGET or the like, and only
go through the trouble if we really have to.
In practice, this might be a wash performance-wise, but it's a little
cleaner conceptually...
"""
def __init__(self, list, func):
self.list = list
self.func = func
def _return_nodelist(self):
return self.nodelist
def _gen_nodelist(self):
mylist = self.list
if mylist is None:
mylist = []
elif not is_Sequence(mylist):
mylist = [mylist]
# The map(self.func) call is what actually turns
# a list into appropriate proxies.
self.nodelist = SCons.Util.NodeList(list(map(self.func, mylist)))
self._create_nodelist = self._return_nodelist
return self.nodelist
_create_nodelist = _gen_nodelist
class Targets_or_Sources(collections.UserList):
"""A class that implements $TARGETS or $SOURCES expansions by in turn
wrapping a NLWrapper. This class handles the different methods used
to access the list, calling the NLWrapper to create proxies on demand.
Note that we subclass collections.UserList purely so that the
is_Sequence() function will identify an object of this class as
a list during variable expansion. We're not really using any
collections.UserList methods in practice.
"""
def __init__(self, nl):
self.nl = nl
def __getattr__(self, attr):
nl = self.nl._create_nodelist()
return getattr(nl, attr)
def __getitem__(self, i):
nl = self.nl._create_nodelist()
return nl[i]
def __getslice__(self, i, j):
nl = self.nl._create_nodelist()
i = max(i, 0); j = max(j, 0)
return nl[i:j]
def __str__(self):
nl = self.nl._create_nodelist()
return str(nl)
def __repr__(self):
nl = self.nl._create_nodelist()
return repr(nl)
class Target_or_Source(object):
"""A class that implements $TARGET or $SOURCE expansions by in turn
wrapping a NLWrapper. This class handles the different methods used
to access an individual proxy Node, calling the NLWrapper to create
a proxy on demand.
"""
def __init__(self, nl):
self.nl = nl
def __getattr__(self, attr):
nl = self.nl._create_nodelist()
try:
nl0 = nl[0]
except IndexError:
# If there is nothing in the list, then we have no attributes to
# pass through, so raise AttributeError for everything.
raise AttributeError("NodeList has no attribute: %s" % attr)
return getattr(nl0, attr)
def __str__(self):
nl = self.nl._create_nodelist()
if nl:
return str(nl[0])
return ''
def __repr__(self):
nl = self.nl._create_nodelist()
if nl:
return repr(nl[0])
return ''
class NullNodeList(SCons.Util.NullSeq):
def __call__(self, *args, **kwargs): return ''
def __str__(self): return ''
NullNodesList = NullNodeList()
def subst_dict(target, source):
"""Create a dictionary for substitution of special
construction variables.
This translates the following special arguments:
target - the target (object or array of objects),
used to generate the TARGET and TARGETS
construction variables
source - the source (object or array of objects),
used to generate the SOURCES and SOURCE
construction variables
"""
dict = {}
if target:
def get_tgt_subst_proxy(thing):
try:
subst_proxy = thing.get_subst_proxy()
except AttributeError:
subst_proxy = thing # probably a string, just return it
return subst_proxy
tnl = NLWrapper(target, get_tgt_subst_proxy)
dict['TARGETS'] = Targets_or_Sources(tnl)
dict['TARGET'] = Target_or_Source(tnl)
# This is a total cheat, but hopefully this dictionary goes
# away soon anyway. We just let these expand to $TARGETS
# because that's "good enough" for the use of ToolSurrogates
# (see test/ToolSurrogate.py) to generate documentation.
dict['CHANGED_TARGETS'] = '$TARGETS'
dict['UNCHANGED_TARGETS'] = '$TARGETS'
else:
dict['TARGETS'] = NullNodesList
dict['TARGET'] = NullNodesList
if source:
def get_src_subst_proxy(node):
try:
rfile = node.rfile
except AttributeError:
pass
else:
node = rfile()
try:
return node.get_subst_proxy()
except AttributeError:
return node # probably a String, just return it
snl = NLWrapper(source, get_src_subst_proxy)
dict['SOURCES'] = Targets_or_Sources(snl)
dict['SOURCE'] = Target_or_Source(snl)
# This is a total cheat, but hopefully this dictionary goes
# away soon anyway. We just let these expand to $TARGETS
# because that's "good enough" for the use of ToolSurrogates
# (see test/ToolSurrogate.py) to generate documentation.
dict['CHANGED_SOURCES'] = '$SOURCES'
dict['UNCHANGED_SOURCES'] = '$SOURCES'
else:
dict['SOURCES'] = NullNodesList
dict['SOURCE'] = NullNodesList
return dict
# Constants for the "mode" parameter to scons_subst_list() and
# scons_subst(). SUBST_RAW gives the raw command line. SUBST_CMD
# gives a command line suitable for passing to a shell. SUBST_SIG
# gives a command line appropriate for calculating the signature
# of a command line...if this changes, we should rebuild.
SUBST_CMD = 0
SUBST_RAW = 1
SUBST_SIG = 2
_rm = re.compile(r'\$[()]')
_remove = re.compile(r'\$\([^\$]*(\$[^\)][^\$]*)*\$\)')
# Indexed by the SUBST_* constants above.
_regex_remove = [ _rm, None, _remove ]
def _rm_list(list):
#return [ l for l in list if not l in ('$(', '$)') ]
return [l for l in list if not l in ('$(', '$)')]
def _remove_list(list):
result = []
do_append = result.append
for l in list:
if l == '$(':
do_append = lambda x: None
elif l == '$)':
do_append = result.append
else:
do_append(l)
return result
# Indexed by the SUBST_* constants above.
_list_remove = [ _rm_list, None, _remove_list ]
# Regular expressions for splitting strings and handling substitutions,
# for use by the scons_subst() and scons_subst_list() functions:
#
# The first expression compiled matches all of the $-introduced tokens
# that we need to process in some way, and is used for substitutions.
# The expressions it matches are:
#
# "$$"
# "$("
# "$)"
# "$variable" [must begin with alphabetic or underscore]
# "${any stuff}"
#
# The second expression compiled is used for splitting strings into tokens
# to be processed, and it matches all of the tokens listed above, plus
# the following that affect how arguments do or don't get joined together:
#
# " " [white space]
# "non-white-space" [without any dollar signs]
# "$" [single dollar sign]
#
_dollar_exps_str = r'\$[\$\(\)]|\$[_a-zA-Z][\.\w]*|\${[^}]*}'
_dollar_exps = re.compile(r'(%s)' % _dollar_exps_str)
_separate_args = re.compile(r'(%s|\s+|[^\s\$]+|\$)' % _dollar_exps_str)
# This regular expression is used to replace strings of multiple white
# space characters in the string result from the scons_subst() function.
_space_sep = re.compile(r'[\t ]+(?![^{]*})')
def scons_subst(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Expand a string or list containing construction variable
substitutions.
This is the work-horse function for substitutions in file names
and the like. The companion scons_subst_list() function (below)
handles separating command lines into lists of arguments, so see
that function if that's what you're looking for.
"""
if isinstance(strSubst, str) and strSubst.find('$') < 0:
return strSubst
class StringSubber(object):
"""A class to construct the results of a scons_subst() call.
This binds a specific construction environment, mode, target and
source with two methods (substitute() and expand()) that handle
the expansion.
"""
def __init__(self, env, mode, conv, gvars):
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
def expand(self, s, lvars):
"""Expand a single "token" as necessary, returning an
appropriate string containing the expansion.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
return s
if s0 != '$':
return s
if s1 == '$':
return '$'
elif s1 in '()':
return s
else:
key = s[1:]
if key[0] == '{' or key.find('.') >= 0:
if key[0] == '{':
key = key[1:-1]
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception, e:
if e.__class__ in AllowableExceptions:
return ''
raise_exception(e, lvars['TARGETS'], s)
else:
if key in lvars:
s = lvars[key]
elif key in self.gvars:
s = self.gvars[key]
elif not NameError in AllowableExceptions:
raise_exception(NameError(key), lvars['TARGETS'], s)
else:
return ''
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
#
# This could potentially be optimized by only
# copying lvars when s contains more expansions,
# but lvars is usually supposed to be pretty
# small, and deeply nested variable expansions
# are probably more the exception than the norm,
# so it should be tolerable for now.
lv = lvars.copy()
var = key.split('.')[0]
lv[var] = ''
return self.substitute(s, lv)
elif is_Sequence(s):
def func(l, conv=self.conv, substitute=self.substitute, lvars=lvars):
return conv(substitute(l, lvars))
return list(map(func, s))
elif callable(s):
try:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode != SUBST_CMD))
except TypeError:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
return s
s = self.conv(s)
return self.substitute(s, lvars)
elif s is None:
return ''
else:
return s
def substitute(self, args, lvars):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
try:
def sub_match(match):
return self.conv(self.expand(match.group(1), lvars))
result = _dollar_exps.sub(sub_match, args)
except TypeError:
# If the internal conversion routine doesn't return
# strings (it could be overridden to return Nodes, for
# example), then the 1.5.2 re module will throw this
# exception. Back off to a slower, general-purpose
# algorithm that works for all data types.
args = _separate_args.findall(args)
result = []
for a in args:
result.append(self.conv(self.expand(a, lvars)))
if len(result) == 1:
result = result[0]
else:
result = ''.join(map(str, result))
return result
else:
return self.expand(args, lvars)
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if 'TARGET' not in lvars:
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ss = StringSubber(env, mode, conv, gvars)
result = ss.substitute(strSubst, lvars)
try:
del gvars['__builtins__']
except KeyError:
pass
if is_String(result):
# Remove $(-$) pairs and any stuff in between,
# if that's appropriate.
remove = _regex_remove[mode]
if remove:
result = remove.sub('', result)
if mode != SUBST_RAW:
# Compress strings of white space characters into
# a single space.
result = _space_sep.sub(' ', result).strip()
elif is_Sequence(result):
remove = _list_remove[mode]
if remove:
result = remove(result)
return result
#Subst_List_Strings = {}
def scons_subst_list(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Substitute construction variables in a string (or list or other
object) and separate the arguments into a command list.
The companion scons_subst() function (above) handles basic
substitutions within strings, so see that function instead
if that's what you're looking for.
"""
# try:
# Subst_List_Strings[strSubst] = Subst_List_Strings[strSubst] + 1
# except KeyError:
# Subst_List_Strings[strSubst] = 1
# import SCons.Debug
# SCons.Debug.caller_trace(1)
class ListSubber(collections.UserList):
"""A class to construct the results of a scons_subst_list() call.
Like StringSubber, this class binds a specific construction
environment, mode, target and source with two methods
(substitute() and expand()) that handle the expansion.
In addition, however, this class is used to track the state of
the result(s) we're gathering so we can do the appropriate thing
whenever we have to append another word to the result--start a new
line, start a new word, append to the current word, etc. We do
this by setting the "append" attribute to the right method so
that our wrapper methods only need ever call ListSubber.append(),
and the rest of the object takes care of doing the right thing
internally.
"""
def __init__(self, env, mode, conv, gvars):
collections.UserList.__init__(self, [])
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
if self.mode == SUBST_RAW:
self.add_strip = lambda x: self.append(x)
else:
self.add_strip = lambda x: None
self.in_strip = None
self.next_line()
def expand(self, s, lvars, within_list):
"""Expand a single "token" as necessary, appending the
expansion to the current result.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
self.append(s)
return
if s0 != '$':
self.append(s)
return
if s1 == '$':
self.append('$')
elif s1 == '(':
self.open_strip('$(')
elif s1 == ')':
self.close_strip('$)')
else:
key = s[1:]
if key[0] == '{' or key.find('.') >= 0:
if key[0] == '{':
key = key[1:-1]
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception, e:
if e.__class__ in AllowableExceptions:
return
raise_exception(e, lvars['TARGETS'], s)
else:
if key in lvars:
s = lvars[key]
elif key in self.gvars:
s = self.gvars[key]
elif not NameError in AllowableExceptions:
raise_exception(NameError(), lvars['TARGETS'], s)
else:
return
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
lv = lvars.copy()
var = key.split('.')[0]
lv[var] = ''
self.substitute(s, lv, 0)
self.this_word()
elif is_Sequence(s):
for a in s:
self.substitute(a, lvars, 1)
self.next_word()
elif callable(s):
try:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode != SUBST_CMD))
except TypeError:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
self.append(s)
return
s = self.conv(s)
self.substitute(s, lvars, within_list)
elif s is None:
self.this_word()
else:
self.append(s)
def substitute(self, args, lvars, within_list):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
args = _separate_args.findall(args)
for a in args:
if a[0] in ' \t\n\r\f\v':
if '\n' in a:
self.next_line()
elif within_list:
self.append(a)
else:
self.next_word()
else:
self.expand(a, lvars, within_list)
else:
self.expand(args, lvars, within_list)
def next_line(self):
"""Arrange for the next word to start a new line. This
is like starting a new word, except that we have to append
another line to the result."""
collections.UserList.append(self, [])
self.next_word()
def this_word(self):
"""Arrange for the next word to append to the end of the
current last word in the result."""
self.append = self.add_to_current_word
def next_word(self):
"""Arrange for the next word to start a new word."""
self.append = self.add_new_word
def add_to_current_word(self, x):
"""Append the string x to the end of the current last word
in the result. If that is not possible, then just add
it as a new word. Make sure the entire concatenated string
inherits the object attributes of x (in particular, the
escape function) by wrapping it as CmdStringHolder."""
if not self.in_strip or self.mode != SUBST_SIG:
try:
current_word = self[-1][-1]
except IndexError:
self.add_new_word(x)
else:
# All right, this is a hack and it should probably
# be refactored out of existence in the future.
# The issue is that we want to smoosh words together
# and make one file name that gets escaped if
# we're expanding something like foo$EXTENSION,
# but we don't want to smoosh them together if
# it's something like >$TARGET, because then we'll
# treat the '>' like it's part of the file name.
# So for now, just hard-code looking for the special
# command-line redirection characters...
try:
last_char = str(current_word)[-1]
except IndexError:
last_char = '\0'
if last_char in '<>|':
self.add_new_word(x)
else:
y = current_word + x
# We used to treat a word appended to a literal
# as a literal itself, but this caused problems
# with interpreting quotes around space-separated
# targets on command lines. Removing this makes
# none of the "substantive" end-to-end tests fail,
# so we'll take this out but leave it commented
# for now in case there's a problem not covered
# by the test cases and we need to resurrect this.
#literal1 = self.literal(self[-1][-1])
#literal2 = self.literal(x)
y = self.conv(y)
if is_String(y):
#y = CmdStringHolder(y, literal1 or literal2)
y = CmdStringHolder(y, None)
self[-1][-1] = y
def add_new_word(self, x):
if not self.in_strip or self.mode != SUBST_SIG:
literal = self.literal(x)
x = self.conv(x)
if is_String(x):
x = CmdStringHolder(x, literal)
self[-1].append(x)
self.append = self.add_to_current_word
def literal(self, x):
try:
l = x.is_literal
except AttributeError:
return None
else:
return l()
def open_strip(self, x):
"""Handle the "open strip" $( token."""
self.add_strip(x)
self.in_strip = 1
def close_strip(self, x):
"""Handle the "close strip" $) token."""
self.add_strip(x)
self.in_strip = None
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if 'TARGET' not in lvars:
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ls = ListSubber(env, mode, conv, gvars)
ls.substitute(strSubst, lvars, 0)
try:
del gvars['__builtins__']
except KeyError:
pass
return ls.data
def scons_subst_once(strSubst, env, key):
"""Perform single (non-recursive) substitution of a single
construction variable keyword.
This is used when setting a variable when copying or overriding values
in an Environment. We want to capture (expand) the old value before
we override it, so people can do things like:
env2 = env.Clone(CCFLAGS = '$CCFLAGS -g')
We do this with some straightforward, brute-force code here...
"""
if isinstance(strSubst, str) and strSubst.find('$') < 0:
return strSubst
matchlist = ['$' + key, '${' + key + '}']
val = env.get(key, '')
def sub_match(match, val=val, matchlist=matchlist):
a = match.group(1)
if a in matchlist:
a = val
if is_Sequence(a):
return ' '.join(map(str, a))
else:
return str(a)
if is_Sequence(strSubst):
result = []
for arg in strSubst:
if is_String(arg):
if arg in matchlist:
arg = val
if is_Sequence(arg):
result.extend(arg)
else:
result.append(arg)
else:
result.append(_dollar_exps.sub(sub_match, arg))
else:
result.append(arg)
return result
elif is_String(strSubst):
return _dollar_exps.sub(sub_match, strSubst)
else:
return strSubst
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/Mathics3-6.0.2.tar.gz/Mathics3-6.0.2/mathics/builtin/colors/named_colors.py | from mathics.builtin.base import Builtin
from mathics.core.symbols import strip_context
class _ColorObject(Builtin):
text_name = None
def __init__(self, *args, **kwargs):
super(_ColorObject, self).__init__(*args, **kwargs)
if self.text_name is None:
text_name = strip_context(self.get_name()).lower()
else:
text_name = self.text_name
doc = """
<url>:WMA link:https://reference.wolfram.com/language/ref/%(text_name)s.html</url>
<dl>
<dt>'%(name)s'
<dd>represents the color %(text_name)s in graphics.
</dl>
>> Graphics[{EdgeForm[Black], %(name)s, Disk[]}, ImageSize->Small]
= -Graphics-
>> %(name)s // ToBoxes
= StyleBox[GraphicsBox[...], ...]
""" % {
"name": strip_context(self.get_name()),
"text_name": text_name,
}
self.summary_text = f"{text_name} color"
if self.__doc__ is None:
self.__doc__ = doc
else:
self.__doc__ = doc + self.__doc__
class Black(_ColorObject):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/Black.html</url>
>> Black
= RGBColor[0, 0, 0]
"""
rules = {"Black": "RGBColor[0, 0, 0]"}
class Blue(_ColorObject):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/Blue.html</url>
>> Blue
= RGBColor[0, 0, 1]
"""
rules = {"Blue": "RGBColor[0, 0, 1]"}
class Brown(_ColorObject):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/Brown.html</url>
>> Brown
= RGBColor[0.6, 0.4, 0.2]
"""
rules = {"Brown": "RGBColor[0.6, 0.4, 0.2]"}
class Cyan(_ColorObject):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/Cyan.html</url>
>> Cyan
= RGBColor[0, 1, 1]
"""
rules = {"Cyan": "RGBColor[0, 1, 1]"}
class Gray(_ColorObject):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/Gray.html</url>
>> Gray
= GrayLevel[0.5]
"""
rules = {"Gray": "GrayLevel[0.5]"}
class Green(_ColorObject):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/Green.html</url>
>> Green
= RGBColor[0, 1, 0]
"""
rules = {"Green": "RGBColor[0, 1, 0]"}
class Magenta(_ColorObject):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/Magenta.html</url>
>> Magenta
= RGBColor[1, 0, 1]
"""
rules = {"Magenta": "RGBColor[1, 0, 1]"}
class LightBlue(_ColorObject):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/LightBlue.html</url>
>> Graphics[{LightBlue, EdgeForm[Black], Disk[]}]
= -Graphics-
>> Plot[Sin[x], {x, 0, 2 Pi}, Background -> LightBlue]
= -Graphics-
"""
text_name = "light blue"
rules = {"LightBlue": "RGBColor[0.87, 0.94, 1]"}
class LightBrown(_ColorObject):
text_name = "light brown"
rules = {"LightBrown": "Lighter[Brown, 0.85]"}
class LightCyan(_ColorObject):
text_name = "light cyan"
rules = {"LightCyan": "Lighter[Cyan, 0.9]"}
class LightGray(_ColorObject):
text_name = "light gray"
rules = {"LightGray": "Lighter[Gray]"}
class LightGreen(_ColorObject):
text_name = "light green"
rules = {"LightGreen": "Lighter[Green, 0.88]"}
class LightMagenta(_ColorObject):
text_name = "light magenta"
rules = {"LightMagenta": "Lighter[Magenta]"}
class LightOrange(_ColorObject):
text_name = "light orange"
rules = {"LightOrange": "RGBColor[1, 0.9, 0.8]"}
class LightPink(_ColorObject):
text_name = "light pink"
rules = {"LightPink": "Lighter[Pink, 0.85]"}
class LightPurple(_ColorObject):
text_name = "light purple"
rules = {"LightPurple": "Lighter[Purple, 0.88]"}
class LightRed(_ColorObject):
text_name = "light red"
rules = {"LightRed": "Lighter[Red, 0.85]"}
class LightYellow(_ColorObject):
text_name = "light yellow"
rules = {"LightYellow": "Lighter[Yellow]"}
class Pink(_ColorObject):
rules = {"Pink": "RGBColor[1.0, 0.5, 0.5]"}
class Purple(_ColorObject):
rules = {"Purple": "RGBColor[0.5, 0, 0.5]"}
class Orange(_ColorObject):
rules = {"Orange": "RGBColor[1, 0.5, 0]"}
class Red(_ColorObject):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/Red.html</url>
>> Red
= RGBColor[1, 0, 0]
"""
rules = {"Red": "RGBColor[1, 0, 0]"}
class Yellow(_ColorObject):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/Yellow.html</url>
>> Yellow
= RGBColor[1, 1, 0]
"""
rules = {"Yellow": "RGBColor[1, 1, 0]"}
class White(_ColorObject):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/White.html</url>
>> White
= GrayLevel[1]
"""
rules = {"White": "GrayLevel[1]"} | PypiClean |
/Bis-Miner-3.11.1.tar.gz/Bis-Miner-3.11.0/Orange/widgets/utils/messagewidget.py | import sys
import enum
import base64
from itertools import chain
from operator import attrgetter
from xml.sax.saxutils import escape
from collections import OrderedDict
# pylint: disable=unused-import
from typing import (
NamedTuple, Tuple, List, Dict, Iterable, Union, Optional, Hashable
)
from AnyQt.QtCore import Qt, QSize, QBuffer
from AnyQt.QtGui import (
QIcon, QPixmap, QPainter, QPalette, QLinearGradient, QBrush, QPen
)
from AnyQt.QtWidgets import (
QWidget, QLabel, QSizePolicy, QStyle, QHBoxLayout, QMessageBox,
QMenu, QWidgetAction, QStyleOption, QStylePainter, QApplication
)
from AnyQt.QtCore import pyqtSignal as Signal
__all__ = ["Message", "MessagesWidget"]
def image_data(pm):
# type: (QPixmap) -> str
"""
Render the contents of the pixmap as a data URL (RFC-2397)
Parameters
----------
pm : QPixmap
Returns
-------
datauri : str
"""
pm = QPixmap(pm)
device = QBuffer()
assert device.open(QBuffer.ReadWrite)
pm.save(device, b'png')
device.close()
data = bytes(device.data())
payload = base64.b64encode(data).decode("ascii")
return "data:image/png;base64," + payload
class Severity(enum.IntEnum):
"""
Message severity level.
"""
Information = QMessageBox.Information
Warning = QMessageBox.Warning
Error = QMessageBox.Critical
class Message(
NamedTuple(
"Message", [
("severity", Severity),
("icon", QIcon),
("text", str),
("informativeText", str),
("detailedText", str),
("textFormat", Qt.TextFormat)
])):
"""
A stateful message/notification.
Parameters
----------
severity : Message.Severity
Severity level (default: Information).
icon : QIcon
Associated icon. If empty the `QStyle.standardIcon` will be used based
on severity.
text : str
Short message text.
informativeText : str
Extra informative text to append to `text` (space permitting).
detailedText : str
Extra detailed text (e.g. exception traceback)
textFormat : Qt.TextFormat
If `Qt.RichText` then the contents of `text`, `informativeText` and
`detailedText` will be rendered as html instead of plain text.
"""
Severity = Severity
Warning = Severity.Warning
Information = Severity.Information
Error = Severity.Error
def __new__(cls, severity=Severity.Information, icon=QIcon(), text="",
informativeText="", detailedText="", textFormat=Qt.PlainText):
return super().__new__(cls, Severity(severity), icon, text,
informativeText, detailedText, textFormat)
def asHtml(self):
# type: () -> str
"""
Render the message as an HTML fragment.
"""
if self.textFormat == Qt.RichText:
render = lambda t: t
else:
render = lambda t: ('<span style="white-space: pre">{}</span>'
.format(escape(t)))
def iconsrc(message):
# type: (Message) -> str
"""
Return an image src url for message icon.
"""
icon = message_icon(message)
pm = icon.pixmap(12, 12)
return image_data(pm)
parts = [
('<div style="white-space:pre" class="message {}">'
.format(self.severity.name.lower())),
('<div class="field-text">'
'<img src="{iconurl}" width="12" height="12" />{text}</div>'
.format(iconurl=iconsrc(self), text=render(self.text)))
]
if self.informativeText:
parts += ['<div class="field-informative-text">{}</div>'
.format(render(self.informativeText))]
if self.detailedText:
parts += ['<blockquote class="field-detailed-text">{}</blockquote>'
.format(render(self.detailedText))]
parts += ['</div>']
return "\n".join(parts)
def isEmpty(self):
# type: () -> bool
"""
Is this message instance empty (has no text or icon)
"""
return (not self.text and self.icon.isNull() and
not self.informativeText and not self.detailedText)
def standard_pixmap(severity):
# type: (Severity) -> QStyle.StandardPixmap
mapping = {
Severity.Information: QStyle.SP_MessageBoxInformation,
Severity.Warning: QStyle.SP_MessageBoxWarning,
Severity.Error: QStyle.SP_MessageBoxCritical,
}
return mapping[severity]
def message_icon(message, style=None):
# type: (Message, Optional[QStyle]) -> QIcon
"""
Return the resolved icon for the message.
If `message.icon` is a valid icon then it is used. Otherwise the
appropriate style icon is used based on the `message.severity`
Parameters
----------
message : Message
style : Optional[QStyle]
Returns
-------
icon : QIcon
"""
if style is None and QApplication.instance() is not None:
style = QApplication.style()
if message.icon.isNull():
icon = style.standardIcon(standard_pixmap(message.severity))
else:
icon = message.icon
return icon
def categorize(messages):
# type: (List[Message]) -> Tuple[Optional[Message], List[Message], List[Message], List[Message]]
"""
Categorize the messages by severity picking the message leader if
possible.
The leader is a message with the highest severity iff it is the only
representative of that severity.
Parameters
----------
messages : List[Messages]
Returns
-------
r : Tuple[Optional[Message], List[Message], List[Message], List[Message]]
"""
errors = [m for m in messages if m.severity == Severity.Error]
warnings = [m for m in messages if m.severity == Severity.Warning]
info = [m for m in messages if m.severity == Severity.Information]
lead = None
if len(errors) == 1:
lead = errors.pop(-1)
elif not errors and len(warnings) == 1:
lead = warnings.pop(-1)
elif not errors and not warnings and len(info) == 1:
lead = info.pop(-1)
return lead, errors, warnings, info
# pylint: disable=too-many-branches
def summarize(messages):
# type: (List[Message]) -> Message
"""
Summarize a list of messages into a single message instance
Parameters
----------
messages: List[Message]
Returns
-------
message: Message
"""
if not messages:
return Message()
if len(messages) == 1:
return messages[0]
lead, errors, warnings, info = categorize(messages)
severity = Severity.Information
icon = QIcon()
leading_text = ""
text_parts = []
if lead is not None:
severity = lead.severity
icon = lead.icon
leading_text = lead.text
elif errors:
severity = Severity.Error
elif warnings:
severity = Severity.Warning
def format_plural(fstr, items, *args, **kwargs):
return fstr.format(len(items), *args,
s="s" if len(items) != 1 else "",
**kwargs)
if errors:
text_parts.append(format_plural("{} error{s}", errors))
if warnings:
text_parts.append(format_plural("{} warning{s}", warnings))
if info:
if not (errors and warnings and lead):
text_parts.append(format_plural("{} message{s}", info))
else:
text_parts.append(format_plural("{} other", info))
if leading_text:
text = leading_text
if text_parts:
text = text + " (" + ", ".join(text_parts) + ")"
else:
text = ", ".join(text_parts)
detailed = "<hr/>".join(m.asHtml()
for m in chain([lead], errors, warnings, info)
if m is not None and not m.isEmpty())
return Message(severity, icon, text, detailedText=detailed,
textFormat=Qt.RichText)
class MessagesWidget(QWidget):
"""
An iconified multiple message display area.
`MessagesWidget` displays a short message along with an icon. If there
are multiple messages they are summarized. The user can click on the
widget to display the full message text in a popup view.
"""
#: Signal emitted when an embedded html link is clicked
#: (if `openExternalLinks` is `False`).
linkActivated = Signal(str)
#: Signal emitted when an embedded html link is hovered.
linkHovered = Signal(str)
Severity = Severity
#: General informative message.
Information = Severity.Information
#: A warning message severity.
Warning = Severity.Warning
#: An error message severity.
Error = Severity.Error
Message = Message
def __init__(self, parent=None, openExternalLinks=False, **kwargs):
kwargs.setdefault(
"sizePolicy",
QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
)
super().__init__(parent, **kwargs)
self.__openExternalLinks = openExternalLinks # type: bool
self.__messages = OrderedDict() # type: Dict[Hashable, Message]
#: The full (joined all messages text - rendered as html), displayed
#: in a tooltip.
self.__fulltext = ""
#: The full text displayed in a popup. Is empty if the message is
#: short
self.__popuptext = ""
#: Leading icon
self.__iconwidget = IconWidget(
sizePolicy=QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
)
#: Inline message text
self.__textlabel = QLabel(
wordWrap=False,
textInteractionFlags=Qt.LinksAccessibleByMouse,
openExternalLinks=self.__openExternalLinks,
sizePolicy=QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum)
)
#: Indicator that extended contents are accessible with a click on the
#: widget.
self.__popupicon = QLabel(
sizePolicy=QSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum),
text="\N{VERTICAL ELLIPSIS}",
visible=False,
)
self.__textlabel.linkActivated.connect(self.linkActivated)
self.__textlabel.linkHovered.connect(self.linkHovered)
self.setLayout(QHBoxLayout())
self.layout().setContentsMargins(2, 1, 2, 1)
self.layout().setSpacing(0)
self.layout().addWidget(self.__iconwidget)
self.layout().addSpacing(4)
self.layout().addWidget(self.__textlabel)
self.layout().addWidget(self.__popupicon)
self.__textlabel.setAttribute(Qt.WA_MacSmallSize)
def sizeHint(self):
sh = super().sizeHint()
h = self.style().pixelMetric(QStyle.PM_SmallIconSize)
return sh.expandedTo(QSize(0, h + 2))
def openExternalLinks(self):
# type: () -> bool
"""
If True then linkActivated signal will be emitted when the user
clicks on an html link in a message, otherwise links are opened
using `QDesktopServices.openUrl`
"""
return self.__openExternalLinks
def setOpenExternalLinks(self, state):
# type: (bool) -> None
"""
"""
# TODO: update popup if open
self.__openExternalLinks = state
self.__textlabel.setOpenExternalLinks(state)
def setMessage(self, message_id, message):
# type: (Hashable, Message) -> None
"""
Add a `message` for `message_id` to the current display.
Note
----
Set an empty `Message` instance to clear the message display but
retain the relative ordering in the display should a message for
`message_id` reactivate.
"""
self.__messages[message_id] = message
self.__update()
def removeMessage(self, message_id):
# type: (Hashable) -> None
"""
Remove message for `message_id` from the display.
Note
----
Setting an empty `Message` instance will also clear the display,
however the relative ordering of the messages will be retained,
should the `message_id` 'reactivate'.
"""
del self.__messages[message_id]
self.__update()
def setMessages(self, messages):
# type: (Union[Iterable[Tuple[Hashable, Message]], Dict[Hashable, Message]]) -> None
"""
Set multiple messages in a single call.
"""
messages = OrderedDict(messages)
self.__messages.update(messages)
self.__update()
def clear(self):
# type: () -> None
"""
Clear all messages.
"""
self.__messages.clear()
self.__update()
def messages(self):
# type: () -> List[Message]
return list(self.__messages.values())
def summarize(self):
# type: () -> Message
"""
Summarize all the messages into a single message.
"""
messages = [m for m in self.__messages.values() if not m.isEmpty()]
if messages:
return summarize(messages)
else:
return Message()
def __update(self):
"""
Update the current display state.
"""
self.ensurePolished()
summary = self.summarize()
icon = message_icon(summary)
self.__iconwidget.setIcon(icon)
self.__iconwidget.setVisible(not (summary.isEmpty() or icon.isNull()))
self.__textlabel.setTextFormat(summary.textFormat)
self.__textlabel.setText(summary.text)
messages = [m for m in self.__messages.values() if not m.isEmpty()]
if messages:
messages = sorted(messages, key=attrgetter("severity"),
reverse=True)
fulltext = "<hr/>".join(m.asHtml() for m in messages)
else:
fulltext = ""
self.__fulltext = fulltext
self.setToolTip(fulltext)
def is_short(m):
return not (m.informativeText or m.detailedText)
if not messages or len(messages) == 1 and is_short(messages[0]):
self.__popuptext = ""
else:
self.__popuptext = fulltext
self.__popupicon.setVisible(bool(self.__popuptext))
self.layout().activate()
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
if self.__popuptext:
popup = QMenu(self)
label = QLabel(
self, textInteractionFlags=Qt.TextBrowserInteraction,
openExternalLinks=self.__openExternalLinks,
text=self.__popuptext
)
label.linkActivated.connect(self.linkActivated)
label.linkHovered.connect(self.linkHovered)
action = QWidgetAction(popup)
action.setDefaultWidget(label)
popup.addAction(action)
popup.popup(event.globalPos(), action)
event.accept()
return
else:
super().mousePressEvent(event)
def enterEvent(self, event):
super().enterEvent(event)
self.update()
def leaveEvent(self, event):
super().leaveEvent(event)
self.update()
def changeEvent(self, event):
super().changeEvent(event)
self.update()
def paintEvent(self, event):
opt = QStyleOption()
opt.initFrom(self)
if not self.__popupicon.isVisible():
return
if not (opt.state & QStyle.State_MouseOver or
opt.state & QStyle.State_HasFocus):
return
palette = opt.palette # type: QPalette
if opt.state & QStyle.State_HasFocus:
pen = QPen(palette.color(QPalette.Highlight))
else:
pen = QPen(palette.color(QPalette.Dark))
if self.__fulltext and \
opt.state & QStyle.State_MouseOver and \
opt.state & QStyle.State_Active:
g = QLinearGradient()
g.setCoordinateMode(QLinearGradient.ObjectBoundingMode)
base = palette.color(QPalette.Window)
base.setAlpha(90)
g.setColorAt(0, base.lighter(200))
g.setColorAt(0.6, base)
g.setColorAt(1.0, base.lighter(200))
brush = QBrush(g)
else:
brush = QBrush(Qt.NoBrush)
p = QPainter(self)
p.setBrush(brush)
p.setPen(pen)
p.drawRect(opt.rect.adjusted(0, 0, -1, -1))
class IconWidget(QWidget):
"""
A widget displaying an `QIcon`
"""
def __init__(self, parent=None, icon=QIcon(), iconSize=QSize(), **kwargs):
sizePolicy = kwargs.pop("sizePolicy", QSizePolicy(QSizePolicy.Fixed,
QSizePolicy.Fixed))
super().__init__(parent, **kwargs)
self.__icon = QIcon(icon)
self.__iconSize = QSize(iconSize)
self.setSizePolicy(sizePolicy)
def setIcon(self, icon):
# type: (QIcon) -> None
if self.__icon != icon:
self.__icon = QIcon(icon)
self.updateGeometry()
self.update()
def icon(self):
# type: () -> QIcon
return QIcon(self.__icon)
def iconSize(self):
# type: () -> QSize
if not self.__iconSize.isValid():
size = self.style().pixelMetric(QStyle.PM_ButtonIconSize)
return QSize(size, size)
else:
return QSize(self.__iconSize)
def setIconSize(self, iconSize):
# type: (QSize) -> None
if self.__iconSize != iconSize:
self.__iconSize = QSize(iconSize)
self.updateGeometry()
self.update()
def sizeHint(self):
sh = self.iconSize()
m = self.contentsMargins()
return QSize(sh.width() + m.left() + m.right(),
sh.height() + m.top() + m.bottom())
def paintEvent(self, event):
painter = QStylePainter(self)
opt = QStyleOption()
opt.initFrom(self)
painter.drawPrimitive(QStyle.PE_Widget, opt)
if not self.__icon.isNull():
rect = self.contentsRect()
if opt.state & QStyle.State_Active:
mode = QIcon.Active
else:
mode = QIcon.Disabled
self.__icon.paint(painter, rect, Qt.AlignCenter, mode, QIcon.Off)
painter.end()
def main(argv=None): # pragma: no cover
from AnyQt.QtWidgets import QVBoxLayout, QCheckBox, QStatusBar
app = QApplication(list(argv) if argv else [])
l1 = QVBoxLayout()
l1.setContentsMargins(0, 0, 0, 0)
blayout = QVBoxLayout()
l1.addLayout(blayout)
sb = QStatusBar()
w = QWidget()
w.setLayout(l1)
messages = [
Message(Severity.Error, text="Encountered a HCF",
detailedText="<em>AAA! It burns.</em>",
textFormat=Qt.RichText),
Message(Severity.Warning,
text="ACHTUNG!",
detailedText=(
"<div style=\"color: red\">DAS KOMPUTERMASCHINE IST "
"NICHT FÜR DER GEFINGERPOKEN</div>"
),
textFormat=Qt.RichText),
Message(Severity.Information,
text="The rain in spain falls mostly on the plain",
informativeText=(
"<a href=\"https://www.google.si/search?q="
"Average+Yearly+Precipitation+in+Spain\">Link</a>"
),
textFormat=Qt.RichText),
Message(Severity.Error,
text="I did not do this!",
informativeText="The computer made suggestions...",
detailedText="... and the default options was yes."),
Message(),
]
mw = MessagesWidget(openExternalLinks=True)
for i, m in enumerate(messages):
cb = QCheckBox(m.text)
def toogled(state, i=i, m=m):
if state:
mw.setMessage(i, m)
else:
mw.removeMessage(i)
cb.toggled[bool].connect(toogled)
blayout.addWidget(cb)
sb.addWidget(mw)
w.layout().addWidget(sb, 0)
w.show()
return app.exec_()
if __name__ == "__main__": # pragma: no cover
sys.exit(main(sys.argv)) | PypiClean |
/MNN-0.0.7-cp27-cp27mu-manylinux2010_x86_64.whl/MNNTools/MNN_FB/InnerProduct.py |
# namespace: MNN
import flatbuffers
class InnerProduct(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsInnerProduct(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = InnerProduct()
x.Init(buf, n + offset)
return x
# InnerProduct
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# InnerProduct
def OutputCount(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# InnerProduct
def BiasTerm(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# InnerProduct
def WeightSize(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# InnerProduct
def Weight(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# InnerProduct
def WeightAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# InnerProduct
def WeightLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
# InnerProduct
def Bias(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# InnerProduct
def BiasAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# InnerProduct
def BiasLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
# InnerProduct
def Axis(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# InnerProduct
def Transpose(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# InnerProduct
def QuanParameter(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .IDSTQuan import IDSTQuan
obj = IDSTQuan()
obj.Init(self._tab.Bytes, x)
return obj
return None
def InnerProductStart(builder): builder.StartObject(8)
def InnerProductAddOutputCount(builder, outputCount): builder.PrependInt32Slot(0, outputCount, 0)
def InnerProductAddBiasTerm(builder, biasTerm): builder.PrependInt32Slot(1, biasTerm, 0)
def InnerProductAddWeightSize(builder, weightSize): builder.PrependInt32Slot(2, weightSize, 0)
def InnerProductAddWeight(builder, weight): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(weight), 0)
def InnerProductStartWeightVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def InnerProductAddBias(builder, bias): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(bias), 0)
def InnerProductStartBiasVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def InnerProductAddAxis(builder, axis): builder.PrependInt32Slot(5, axis, 0)
def InnerProductAddTranspose(builder, transpose): builder.PrependBoolSlot(6, transpose, 0)
def InnerProductAddQuanParameter(builder, quanParameter): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(quanParameter), 0)
def InnerProductEnd(builder): return builder.EndObject() | PypiClean |
/Kamaelia-0.6.0.tar.gz/Kamaelia-0.6.0/Tools/DocGen/TestSuiteRun.py | import re
import os
import sys
def writeOut(filename,data):
"""Write data to the named file"""
F=open(filename,"w")
F.write(data)
F.close()
def processDirectory(suiteDir, outFilePath, filePattern):
"""\
Recurse through test suite directory running any python files matching the
specified filename pattern (a compiled regular expression) and collecting
the output and splitting it into separate output text files.
"""
dirEntries = os.listdir(suiteDir)
for filename in dirEntries:
filepath = os.path.join(suiteDir, filename)
if os.path.isdir(filepath):
processDirectory(filepath, outFilePath+"."+filename, filePattern)
else:
match = filePattern.match(filename)
if match:
nameFragment = match.group(1)
outname = outFilePath+"."+nameFragment
print "Running: "+filepath+" ..."
print
inpipe, outpipe = os.popen4(filepath+" -v")
lines = outpipe.readlines()
inpipe.close()
outpipe.close()
output, failures, msgs = parseLines(lines)
writeOut(outname+"...ok", "".join(output))
writeOut(outname+"...fail", "".join(failures))
writeOut(outname+"...msgs", "".join(msgs))
pattern_ok = re.compile("^(.*) \.\.\. ok\n$")
pattern_fail = re.compile("^(.*) \.\.\. FAIL\n$")
def parseLines(lines):
"""\
Parse lines of output from a unittest run, separating them into
passes, failures and messages
"""
passes = []
failures = []
msgs = []
state="LINES"
for line in lines:
print line,
if state=="LINES":
if pattern_ok.match(line):
msg = pattern_ok.match(line).group(1)
passes.append(msg+"\n")
elif pattern_fail.match(line):
msg = pattern_fail.match(line).group(1)
failures.append(msg+"\n")
else:
state="ERROR REPORTS"
if state=="ERROR REPORTS":
if re.match("Ran \d+ tests? in \d*(\.\d+)?s\n$",line):
state="DONE"
else:
msgs.append(line)
return passes,failures,msgs
if __name__ == "__main__":
testSuiteDir = None
testOutputDir = None
moduleRoot = None
filePattern = re.compile("^test_([^\.]*)\.py$")
cmdLineArgs = []
for arg in sys.argv[1:]:
if arg[:2] == "--" and len(arg)>2:
cmdLineArgs.append(arg.lower())
else:
cmdLineArgs.append(arg)
if not cmdLineArgs or "--help" in cmdLineArgs or "-h" in cmdLineArgs:
sys.stderr.write("\n".join([
"Usage:",
"",
" "+sys.argv[0]+" <arguments - see below>",
"",
"Optional arguments:",
"",
" --help Display this help message",
"",
" --codebase <dir> The directory containing the codebase - will be",
" pre-pended to python's module path. Default is nothing.",
"",
" --root <moduleRoot> The module path leading up to the repositoryDir specified",
" eg. Axon, if testSuiteDir='.../Tests/Python/Axon/'",
" Default is the leaf directory name of the <testSuiteDir>",
"",
"Mandatory arguments:",
"",
" --outdir <dir> Directory to put output into (default is 'pydoc')",
" directory must already exist (and be emptied)",
"",
" <testSuiteDir> Use Kamaelia modules here instead of the installed ones",
"",
"",
]))
sys.exit(0)
try:
if "--outdir" in cmdLineArgs:
index = cmdLineArgs.index("--outdir")
testOutputDir = cmdLineArgs[index+1]
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if "--root" in cmdLineArgs:
index = cmdLineArgs.index("--root")
moduleRoot = cmdLineArgs[index+1]
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if "--codebase" in cmdLineArgs:
index = cmdLineArgs.index("--codebase")
codeBaseDir = cmdLineArgs[index+1]
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if len(cmdLineArgs)==1:
testSuiteDir = cmdLineArgs[0]
elif len(cmdLineArgs)==0:
testSuiteDir = None
else:
raise
except:
sys.stderr.write("\n".join([
"Error in command line arguments.",
"Run with '--help' for info on command line arguments.",
"",
"",
]))
sys.exit(1)
sys.argv=sys.argv[0:0]
assert(testSuiteDir)
assert(testOutputDir)
if moduleRoot is None:
# if no module root specified, strip down the test suite dir for the leaf directory name
moduleRoot = os.path.abspath(testSuiteDir)
moduleRoot = os.path.split(moduleRoot)[1]
assert(moduleRoot)
if codeBaseDir is not None:
# if codebase is specified, set the pythonpath variable so it will
# be found by subsequent python apps we run
os.putenv("PYTHONPATH",codeBaseDir)
outDir = os.path.join(testOutputDir,moduleRoot) # ensure its already got the suffix
processDirectory(testSuiteDir,outDir,filePattern) | PypiClean |
/DisplaceNet-0.1.tar.gz/DisplaceNet-0.1/preprocessing/emotic/x_train_csv_to_numpy.py | import numpy as np
import pandas
from utils.generic_utils import progress, imagenet_preprocess_input, places_preprocess_input
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras.applications.resnet50 import preprocess_input
# reference https://stackoverflow.com/questions/12984426/python-pil-ioerror-image-file-truncated-with-big-images
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def save_x_train_to_numpy(base_img_dir,
base_csv_dir,
input_size,
input_img_mode,
mode='train',
to_file = 'numpy_annotations/x_train'
):
""" Saves images loaded from a CSV to numpy array.
# Arguments
base_img_dir: the directory where the raw images are stored.
In our setup, we:
- created train/ val/ and test/ subfolders inside EMOTIC_database/
base_csv_dir: the directory where the CSV files are stored.
input_size: the default input size for the model (ref https://keras.io/applications/).
All models have input size of 224x224 except Xception,InceptionV3 and InceptionResNetV2 which have input size of 299x299.
input_img_mode: one of `body` (cropped images) or `body` (entire images)
mode: one of `train` (train set), `val` (validation set)
or `test` (test set).
to_file: the name or path of the numpy array where the images will be saved.
"""
# Load CSV File With Pandas
csv_name = base_csv_dir + mode + '.csv'
csv_file = pandas.read_csv(csv_name)
if mode == 'train':
nb_samples = 23706
elif mode == 'val':
nb_samples = 3332
elif mode == 'test':
nb_samples = 7280
field_number = 0
# pre-allocating the data array, and then loading the data directly into it
# ref: https://hjweide.github.io/efficient-image-loading
data = np.empty((nb_samples, input_size, input_size, 3), dtype=np.uint8)
for img_name in csv_file.filename:
progress(field_number, nb_samples)
img_name = base_img_dir + img_name
img = image.load_img(img_name, target_size=(input_size, input_size)) # load an image from file
x = image.img_to_array(img) # this is a Numpy array with shape (input_size, input_size, 3)
x = np.expand_dims(x, axis=0) # this is a Numpy array with shape (1, input_size, input_size, 3)
# prepare the image for either ImageNet-based or Places-based models
if input_img_mode == 'body':
x = imagenet_preprocess_input(x)
elif input_img_mode == 'image':
x = places_preprocess_input(x)
# x = (x / 255.).astype(np.float32)
# scaling the RGB values to a 0-1.0 range.
# reference https://www.linkedin.com/pulse/keras-image-preprocessing-scaling-pixels-training-adwin-jahn/
# x /= 255
data[field_number, ...] = x
field_number += 1
if field_number > nb_samples - 1:
break
np.save(to_file, data)
return data
if __name__ == '__main__':
# one of `train` (train set), `val` (validation set) or `test` (test set).
mode = 'test'
# one of `body` or `image`
input_img_mode = 'image'
if input_img_mode == 'body':
base_img_dir = '/home/sandbox/Desktop/EMOTIC_resources/raw_refined_images/cropped_imgs/'
to_file = 'x_body_'+mode
elif input_img_mode == 'image':
base_img_dir = '/home/sandbox/Desktop/EMOTIC_resources/raw_refined_images/entire_multiple_imgs/'
to_file = 'x_image_' + mode
x_train = save_x_train_to_numpy(base_img_dir=base_img_dir,
base_csv_dir='/home/sandbox/Desktop/EMOTIC_resources/VAD-classification/CSV/',
input_size=224,
input_img_mode = input_img_mode,
mode=mode,
to_file=to_file) | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/debug/README.md | # debug
[](https://travis-ci.org/debug-js/debug) [](https://coveralls.io/github/debug-js/debug?branch=master) [](https://visionmedia-community-slackin.now.sh/) [](#backers)
[](#sponsors)
<img width="647" src="https://user-images.githubusercontent.com/71256/29091486-fa38524c-7c37-11e7-895f-e7ec8e1039b6.png">
A tiny JavaScript debugging utility modelled after Node.js core's debugging
technique. Works in Node.js and web browsers.
## Installation
```bash
$ npm install debug
```
## Usage
`debug` exposes a function; simply pass this function the name of your module, and it will return a decorated version of `console.error` for you to pass debug statements to. This will allow you to toggle the debug output for different parts of your module as well as the module as a whole.
Example [_app.js_](./examples/node/app.js):
```js
var debug = require('debug')('http')
, http = require('http')
, name = 'My App';
// fake app
debug('booting %o', name);
http.createServer(function(req, res){
debug(req.method + ' ' + req.url);
res.end('hello\n');
}).listen(3000, function(){
debug('listening');
});
// fake worker of some kind
require('./worker');
```
Example [_worker.js_](./examples/node/worker.js):
```js
var a = require('debug')('worker:a')
, b = require('debug')('worker:b');
function work() {
a('doing lots of uninteresting work');
setTimeout(work, Math.random() * 1000);
}
work();
function workb() {
b('doing some work');
setTimeout(workb, Math.random() * 2000);
}
workb();
```
The `DEBUG` environment variable is then used to enable these based on space or
comma-delimited names.
Here are some examples:
<img width="647" alt="screen shot 2017-08-08 at 12 53 04 pm" src="https://user-images.githubusercontent.com/71256/29091703-a6302cdc-7c38-11e7-8304-7c0b3bc600cd.png">
<img width="647" alt="screen shot 2017-08-08 at 12 53 38 pm" src="https://user-images.githubusercontent.com/71256/29091700-a62a6888-7c38-11e7-800b-db911291ca2b.png">
<img width="647" alt="screen shot 2017-08-08 at 12 53 25 pm" src="https://user-images.githubusercontent.com/71256/29091701-a62ea114-7c38-11e7-826a-2692bedca740.png">
#### Windows command prompt notes
##### CMD
On Windows the environment variable is set using the `set` command.
```cmd
set DEBUG=*,-not_this
```
Example:
```cmd
set DEBUG=* & node app.js
```
##### PowerShell (VS Code default)
PowerShell uses different syntax to set environment variables.
```cmd
$env:DEBUG = "*,-not_this"
```
Example:
```cmd
$env:DEBUG='app';node app.js
```
Then, run the program to be debugged as usual.
npm script example:
```js
"windowsDebug": "@powershell -Command $env:DEBUG='*';node app.js",
```
## Namespace Colors
Every debug instance has a color generated for it based on its namespace name.
This helps when visually parsing the debug output to identify which debug instance
a debug line belongs to.
#### Node.js
In Node.js, colors are enabled when stderr is a TTY. You also _should_ install
the [`supports-color`](https://npmjs.org/supports-color) module alongside debug,
otherwise debug will only use a small handful of basic colors.
<img width="521" src="https://user-images.githubusercontent.com/71256/29092181-47f6a9e6-7c3a-11e7-9a14-1928d8a711cd.png">
#### Web Browser
Colors are also enabled on "Web Inspectors" that understand the `%c` formatting
option. These are WebKit web inspectors, Firefox ([since version
31](https://hacks.mozilla.org/2014/05/editable-box-model-multiple-selection-sublime-text-keys-much-more-firefox-developer-tools-episode-31/))
and the Firebug plugin for Firefox (any version).
<img width="524" src="https://user-images.githubusercontent.com/71256/29092033-b65f9f2e-7c39-11e7-8e32-f6f0d8e865c1.png">
## Millisecond diff
When actively developing an application it can be useful to see when the time spent between one `debug()` call and the next. Suppose for example you invoke `debug()` before requesting a resource, and after as well, the "+NNNms" will show you how much time was spent between calls.
<img width="647" src="https://user-images.githubusercontent.com/71256/29091486-fa38524c-7c37-11e7-895f-e7ec8e1039b6.png">
When stdout is not a TTY, `Date#toISOString()` is used, making it more useful for logging the debug information as shown below:
<img width="647" src="https://user-images.githubusercontent.com/71256/29091956-6bd78372-7c39-11e7-8c55-c948396d6edd.png">
## Conventions
If you're using this in one or more of your libraries, you _should_ use the name of your library so that developers may toggle debugging as desired without guessing names. If you have more than one debuggers you _should_ prefix them with your library name and use ":" to separate features. For example "bodyParser" from Connect would then be "connect:bodyParser". If you append a "*" to the end of your name, it will always be enabled regardless of the setting of the DEBUG environment variable. You can then use it for normal output as well as debug output.
## Wildcards
The `*` character may be used as a wildcard. Suppose for example your library has
debuggers named "connect:bodyParser", "connect:compress", "connect:session",
instead of listing all three with
`DEBUG=connect:bodyParser,connect:compress,connect:session`, you may simply do
`DEBUG=connect:*`, or to run everything using this module simply use `DEBUG=*`.
You can also exclude specific debuggers by prefixing them with a "-" character.
For example, `DEBUG=*,-connect:*` would include all debuggers except those
starting with "connect:".
## Environment Variables
When running through Node.js, you can set a few environment variables that will
change the behavior of the debug logging:
| Name | Purpose |
|-----------|-------------------------------------------------|
| `DEBUG` | Enables/disables specific debugging namespaces. |
| `DEBUG_HIDE_DATE` | Hide date from debug output (non-TTY). |
| `DEBUG_COLORS`| Whether or not to use colors in the debug output. |
| `DEBUG_DEPTH` | Object inspection depth. |
| `DEBUG_SHOW_HIDDEN` | Shows hidden properties on inspected objects. |
__Note:__ The environment variables beginning with `DEBUG_` end up being
converted into an Options object that gets used with `%o`/`%O` formatters.
See the Node.js documentation for
[`util.inspect()`](https://nodejs.org/api/util.html#util_util_inspect_object_options)
for the complete list.
## Formatters
Debug uses [printf-style](https://wikipedia.org/wiki/Printf_format_string) formatting.
Below are the officially supported formatters:
| Formatter | Representation |
|-----------|----------------|
| `%O` | Pretty-print an Object on multiple lines. |
| `%o` | Pretty-print an Object all on a single line. |
| `%s` | String. |
| `%d` | Number (both integer and float). |
| `%j` | JSON. Replaced with the string '[Circular]' if the argument contains circular references. |
| `%%` | Single percent sign ('%'). This does not consume an argument. |
### Custom formatters
You can add custom formatters by extending the `debug.formatters` object.
For example, if you wanted to add support for rendering a Buffer as hex with
`%h`, you could do something like:
```js
const createDebug = require('debug')
createDebug.formatters.h = (v) => {
return v.toString('hex')
}
// …elsewhere
const debug = createDebug('foo')
debug('this is hex: %h', new Buffer('hello world'))
// foo this is hex: 68656c6c6f20776f726c6421 +0ms
```
## Browser Support
You can build a browser-ready script using [browserify](https://github.com/substack/node-browserify),
or just use the [browserify-as-a-service](https://wzrd.in/) [build](https://wzrd.in/standalone/debug@latest),
if you don't want to build it yourself.
Debug's enable state is currently persisted by `localStorage`.
Consider the situation shown below where you have `worker:a` and `worker:b`,
and wish to debug both. You can enable this using `localStorage.debug`:
```js
localStorage.debug = 'worker:*'
```
And then refresh the page.
```js
a = debug('worker:a');
b = debug('worker:b');
setInterval(function(){
a('doing some work');
}, 1000);
setInterval(function(){
b('doing some work');
}, 1200);
```
In Chromium-based web browsers (e.g. Brave, Chrome, and Electron), the JavaScript console will—by default—only show messages logged by `debug` if the "Verbose" log level is _enabled_.
<img width="647" src="https://user-images.githubusercontent.com/7143133/152083257-29034707-c42c-4959-8add-3cee850e6fcf.png">
## Output streams
By default `debug` will log to stderr, however this can be configured per-namespace by overriding the `log` method:
Example [_stdout.js_](./examples/node/stdout.js):
```js
var debug = require('debug');
var error = debug('app:error');
// by default stderr is used
error('goes to stderr!');
var log = debug('app:log');
// set this namespace to log via console.log
log.log = console.log.bind(console); // don't forget to bind to console!
log('goes to stdout');
error('still goes to stderr!');
// set all output to go via console.info
// overrides all per-namespace log settings
debug.log = console.info.bind(console);
error('now goes to stdout via console.info');
log('still goes to stdout, but via console.info now');
```
## Extend
You can simply extend debugger
```js
const log = require('debug')('auth');
//creates new debug instance with extended namespace
const logSign = log.extend('sign');
const logLogin = log.extend('login');
log('hello'); // auth hello
logSign('hello'); //auth:sign hello
logLogin('hello'); //auth:login hello
```
## Set dynamically
You can also enable debug dynamically by calling the `enable()` method :
```js
let debug = require('debug');
console.log(1, debug.enabled('test'));
debug.enable('test');
console.log(2, debug.enabled('test'));
debug.disable();
console.log(3, debug.enabled('test'));
```
print :
```
1 false
2 true
3 false
```
Usage :
`enable(namespaces)`
`namespaces` can include modes separated by a colon and wildcards.
Note that calling `enable()` completely overrides previously set DEBUG variable :
```
$ DEBUG=foo node -e 'var dbg = require("debug"); dbg.enable("bar"); console.log(dbg.enabled("foo"))'
=> false
```
`disable()`
Will disable all namespaces. The functions returns the namespaces currently
enabled (and skipped). This can be useful if you want to disable debugging
temporarily without knowing what was enabled to begin with.
For example:
```js
let debug = require('debug');
debug.enable('foo:*,-foo:bar');
let namespaces = debug.disable();
debug.enable(namespaces);
```
Note: There is no guarantee that the string will be identical to the initial
enable string, but semantically they will be identical.
## Checking whether a debug target is enabled
After you've created a debug instance, you can determine whether or not it is
enabled by checking the `enabled` property:
```javascript
const debug = require('debug')('http');
if (debug.enabled) {
// do stuff...
}
```
You can also manually toggle this property to force the debug instance to be
enabled or disabled.
## Usage in child processes
Due to the way `debug` detects if the output is a TTY or not, colors are not shown in child processes when `stderr` is piped. A solution is to pass the `DEBUG_COLORS=1` environment variable to the child process.
For example:
```javascript
worker = fork(WORKER_WRAP_PATH, [workerPath], {
stdio: [
/* stdin: */ 0,
/* stdout: */ 'pipe',
/* stderr: */ 'pipe',
'ipc',
],
env: Object.assign({}, process.env, {
DEBUG_COLORS: 1 // without this settings, colors won't be shown
}),
});
worker.stderr.pipe(process.stderr, { end: false });
```
## Authors
- TJ Holowaychuk
- Nathan Rajlich
- Andrew Rhyne
- Josh Junon
## Backers
Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/debug#backer)]
<a href="https://opencollective.com/debug/backer/0/website" target="_blank"><img src="https://opencollective.com/debug/backer/0/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/1/website" target="_blank"><img src="https://opencollective.com/debug/backer/1/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/2/website" target="_blank"><img src="https://opencollective.com/debug/backer/2/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/3/website" target="_blank"><img src="https://opencollective.com/debug/backer/3/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/4/website" target="_blank"><img src="https://opencollective.com/debug/backer/4/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/5/website" target="_blank"><img src="https://opencollective.com/debug/backer/5/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/6/website" target="_blank"><img src="https://opencollective.com/debug/backer/6/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/7/website" target="_blank"><img src="https://opencollective.com/debug/backer/7/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/8/website" target="_blank"><img src="https://opencollective.com/debug/backer/8/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/9/website" target="_blank"><img src="https://opencollective.com/debug/backer/9/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/10/website" target="_blank"><img src="https://opencollective.com/debug/backer/10/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/11/website" target="_blank"><img src="https://opencollective.com/debug/backer/11/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/12/website" target="_blank"><img src="https://opencollective.com/debug/backer/12/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/13/website" target="_blank"><img src="https://opencollective.com/debug/backer/13/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/14/website" target="_blank"><img src="https://opencollective.com/debug/backer/14/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/15/website" target="_blank"><img src="https://opencollective.com/debug/backer/15/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/16/website" target="_blank"><img src="https://opencollective.com/debug/backer/16/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/17/website" target="_blank"><img src="https://opencollective.com/debug/backer/17/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/18/website" target="_blank"><img src="https://opencollective.com/debug/backer/18/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/19/website" target="_blank"><img src="https://opencollective.com/debug/backer/19/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/20/website" target="_blank"><img src="https://opencollective.com/debug/backer/20/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/21/website" target="_blank"><img src="https://opencollective.com/debug/backer/21/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/22/website" target="_blank"><img src="https://opencollective.com/debug/backer/22/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/23/website" target="_blank"><img src="https://opencollective.com/debug/backer/23/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/24/website" target="_blank"><img src="https://opencollective.com/debug/backer/24/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/25/website" target="_blank"><img src="https://opencollective.com/debug/backer/25/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/26/website" target="_blank"><img src="https://opencollective.com/debug/backer/26/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/27/website" target="_blank"><img src="https://opencollective.com/debug/backer/27/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/28/website" target="_blank"><img src="https://opencollective.com/debug/backer/28/avatar.svg"></a>
<a href="https://opencollective.com/debug/backer/29/website" target="_blank"><img src="https://opencollective.com/debug/backer/29/avatar.svg"></a>
## Sponsors
Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/debug#sponsor)]
<a href="https://opencollective.com/debug/sponsor/0/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/1/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/2/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/3/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/4/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/5/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/6/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/7/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/8/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/9/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/9/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/10/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/10/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/11/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/11/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/12/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/12/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/13/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/13/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/14/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/14/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/15/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/15/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/16/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/16/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/17/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/17/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/18/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/18/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/19/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/19/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/20/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/20/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/21/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/21/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/22/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/22/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/23/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/23/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/24/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/24/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/25/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/25/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/26/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/26/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/27/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/27/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/28/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/28/avatar.svg"></a>
<a href="https://opencollective.com/debug/sponsor/29/website" target="_blank"><img src="https://opencollective.com/debug/sponsor/29/avatar.svg"></a>
## License
(The MIT License)
Copyright (c) 2014-2017 TJ Holowaychuk <tj@vision-media.ca>
Copyright (c) 2018-2021 Josh Junon
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| PypiClean |
/Ivolution-1.0.tar.gz/Ivolution-1.0/ivolution/FacemovieThread.py | import sys
import threading
import time
import logging
import Facemovie_lib
class Observer():
"""
Implements a simple Observer from the Observer pattern
"""
def __init__(self, name="bob"):
"""
"""
self.name = name
def update(self, message):
"""
"""
if message is not None:
#print "%s received %s" %(self.name, message)
pass
def __str__(self):
return self.name
class Observable():
"""
Implements a simple Observable from the Observer pattern
"""
def __init__(self):
"""
"""
self.val = 1
self.obs_collection = []
def subscribe(self, observer):
"""
"""
try:
if not(observer in self.obs_collection):
self.obs_collection.append(observer)
#print "%s added to collection" %(str(observer))
else:
#print "%s already in collection" %(str(observer))
pass
except TypeError:
#print "Failed to add %s" %(str(observer))
pass
def unsubscribe(self, observer):
"""
"""
try:
if observer in self.obs_collection:
self.obs_collection.remove(observer)
#print "%s removed from collection" %(str(observer))
else:
#print "%s not in collection" %(str(observer))
pass
except TypeError:
#print "Failed to remove %s" %(str(observer))
pass
def notify(self, message):
"""
"""
for observer in self.obs_collection:
#print "sent %s to %s" %(message, str(observer))
observer.update(message)
def set_val(self, val=1):
"""
"""
self.val += val
self.notify(str(self.val))
class FacemovieThread(threading.Thread, Observable, Observer):
'''
Creates a Thread version of Facemovie using the facemovie_lib.
This class can then be run anywhere, from a GUI, script, ...
'''
def __init__(self, face_params):
"""
Initializes all parameters of the application. Input and output folders
are defined, together with the classifier profile.
:param face_params: A faceparams object that contains all needed information to run the Facemovie.
:type face_params: FaceParams
"""
threading.Thread.__init__(self)
Observable.__init__(self)
Observer.__init__(self, "Facemovie")
self.stop_process = False
self.face_params = face_params
self.facemovie = Facemovie_lib.FaceMovie(self.face_params)
self.my_logger = logging.getLogger('FileLog')
self.console_logger = logging.getLogger('ConsoleLog')
def update(self, message):
"""
Trigerred by IvolutionWindow.
Uses the Observer pattern to inform the user about the progress of the GUI.
"""
if message[0] == "STOP":
self.console_logger.debug("Facemovie is going to stop")
self.my_logger.debug("Facemovie is going to stop")
self.stop_process = True
elif message[0] == "START":
self.console_logger.debug("Facemovie is asked to stop")
self.my_logger.debug("Facemovie is asked to stop")
self.stop_process = False
else:
self.console_logger.debug(message[0])
self.my_logger.debug(message[0])
def run(self):
# FIXME : Quite ugly way of doing. Find better!
if not self.stop_process:
self.facemovie.list_guys()
self.my_logger.debug("Guys listed")
self.notify(["Pictures listed", 0.2])
if not self.stop_process:
self.facemovie.prepare_faces() # I want to search for the faces, and characteristics of the images
self.my_logger.debug("Faces prepared")
self.notify(["Faces detected", 0.6])
if not self.stop_process:
self.facemovie.find_final_dimensions() # finds output size for desired mode.
self.my_logger.debug("Final dimensions found")
self.notify(["Video dimensions found", 0.8])
if not self.stop_process:
self.facemovie.save_movie()
self.my_logger.debug("Movie saved")
self.notify(["Movie saved, Finished!", 1.0])
if not self.stop_process:
self.my_logger.debug("Thread terminated")
if self.stop_process:
self.notify(["Process cancelled!", 1.0]) | PypiClean |
/aleksis_core-3.1.5-py3-none-any.whl/aleksis/core/frontend/mixins/offline.js | import gqlPing from "../components/app/ping.graphql";
/**
* Mixin for handling of offline state / background queries.
*
* This handles three scenarios:
* - The navigator reports that it is in offline mode
* - The global offline flag was set due to network errors from queries
* - The navigator reports the page to be invisible
*
* The main goal is to save bandwidth, energy and server load in error
* conditions, or when the page is not in focus. This is achieved by a
* fallback strategy, where all background queries are stopped in offline
* state, and only a ping query is sent once the navigator reports itself
* as online and the app gets into focus. Once this ping query is successful,
* background activity is resumed.
*/
const offlineMixin = {
data() {
return {
ping: null,
};
},
mounted() {
this.safeAddEventListener(window, "online", () => {
console.info("Navigator changed status to online.");
this.checkOfflineState();
});
this.safeAddEventListener(window, "offline", () => {
console.info("Navigator changed status to offline.");
this.checkOfflineState();
});
this.safeAddEventListener(document, "visibilitychange", () => {
console.info("Visibility changed status to", document.visibilityState);
this.checkOfflineState();
});
},
methods: {
checkOfflineState() {
if (navigator.onLine && document.visibilityState === "visible") {
console.info("Resuming background activity");
this.$root.backgroundActive = true;
} else {
console.info("Pausing background activity");
this.$root.backgroundActive = false;
}
},
},
apollo: {
ping: {
query: gqlPing,
variables: () => {
return {
payload: Date.now().toString(),
};
},
pollInterval: 1000,
skip: (component) => {
// We only want to run this query when background activity is on and we are reported offline
return !(component.$root.backgroundActive && component.$root.offline);
},
},
},
watch: {
ping() {
console.info("Pong received, clearing offline state");
this.$root.offline = false;
},
},
};
export default offlineMixin; | PypiClean |
/HierCC-1.24.tar.gz/HierCC-1.24/HCCeval_fastANI.py |
import os, sys, pandas as pd, numpy as np, logging, gzip
import click
from sklearn.metrics import silhouette_score, normalized_mutual_info_score
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from multiprocessing import Pool
import SharedArray as sa
from tempfile import NamedTemporaryFile
try :
from getDistance import getDistance
except :
from .getDistance import getDistance
logging.basicConfig(format='%(asctime)s | %(message)s',stream=sys.stdout, level=logging.INFO)
def get_similarity2(data) :
method, cc1, cc2 = data
if np.unique(cc1).size == 1 and np.unique(cc1).size == 1 :
return 1.
return method(cc1, cc2)
def get_similarity(method, cluster, stepwise, pool) :
logging.info('Calculating NMIs...')
similarity = np.ones([cluster.shape[1], cluster.shape[1]], dtype=np.float64)
for i1, cc1 in enumerate(cluster.T) :
if i1 % 10 == 0 :
logging.info(' NMIs between level {0} and greater levels'.format(i1 * stepwise))
similarity[i1, i1+1:] = pool.map(get_similarity2, [ [method, cc1, cc2] for cc2 in cluster.T[i1+1:] ])
similarity[i1+1:, i1] = similarity[i1, i1+1:]
similarity[similarity>0.999] = 0.999
similarity[similarity<0.0] = 0.0
return similarity
def get_silhouette(dist, cluster, stepwise, pool) :
dist_buf = 'file://{0}.dist'.format('x2')
dist2 = sa.create(dist_buf, dist.shape, dist.dtype)
dist2[:] = dist[:]
logging.info('Calculating Silhouette score ...')
silhouette = np.array(pool.map(get_silhouette2, [ [dist_buf, tag] for tag in cluster.T ]))
sa.delete(dist_buf)
return silhouette
def get_silhouette2(data) :
dist_buf, tag = data
s = np.unique(tag)
if 2 <= s.size < tag.shape[0] :
dist = sa.attach(dist_buf)
ss = silhouette_score(dist.astype(float), tag, metric = 'precomputed')
return ss
else :
return 0.
def prepare_mat(profile_file) :
mat = pd.read_csv(profile_file, sep='\t', header=None, dtype=str).values
ids = {n:i for i, n in enumerate(mat[1:, 0])}
mat[1:, 0] = [ ids[m] for m in mat[1:, 0] ]
allele_columns = np.array([i == 0 or (not h.startswith('#')) for i, h in enumerate(mat[0])])
mat = mat[1:, allele_columns].astype(int)
mat = mat[mat.T[0]>=0]
return ids, mat
def read_fastANI(profile_file, ids) :
dist = np.zeros([len(ids), len(ids)], dtype=float)
dist[:] = 75.
with gzip.open(profile_file, 'rt') as fin :
for line in fin :
part = line.strip().split('\t')
i1, i2 = ids[part[0]], ids[part[1]]
dist[i1, i2] = float(part[2])
dist = np.array(dist)
idx = dist.T > dist
dist[idx] = dist.T[idx]
np.fill_diagonal(dist, 100.)
return 100-dist
@click.command()
@click.option('-p', '--profile', help='[INPUT] Name of a profile file consisting of a table of columns of the ST numbers and the allelic numbers, separated by tabs. Can be GZIPped.', required=True)
@click.option('-c', '--cluster', help='[INPUT] Name of the pHierCC text output. Can be GZIPped.', required=True)
@click.option('-o', '--output', help='[OUTPUT] Prefix for the two output files.', required=True)
@click.option('-s', '--stepwise', help='[INPUT; optional] Evaluate every <stepwise> levels (Default: 10).', default=10, type=int)
@click.option('-n', '--n_proc', help='[INPUT; optional] Number of processes (CPUs) to use (Default: 4).', default=4, type=int)
def evalHCC(profile, cluster, output, stepwise, n_proc) :
'''evalHCC evaluates a HierCC scheme using varied statistic summaries.'''
pool = Pool(n_proc)
ids, cluster = prepare_mat(cluster)
dist = read_fastANI(profile, ids)
print(len(dist), len(cluster))
silhouette = get_silhouette(dist, cluster, stepwise, pool)
similarity = get_similarity(normalized_mutual_info_score, cluster, stepwise, pool)
with open(output+'.tsv', 'w') as fout:
levels = ['HC{0}'.format(lvl*stepwise) for lvl in np.arange(silhouette.shape[0])]
for lvl, ss in zip(levels, silhouette) :
fout.write('#Silhouette\t{0}\t{1}\n'.format(lvl, ss))
fout.write('\n#NMI\t{0}\n'.format('\t'.join(levels)))
for lvl, nmis in zip(levels, similarity):
fout.write('{0}\t{1}\n'.format(lvl, '\t'.join([ '{0:.3f}'.format(nmi) for nmi in nmis ])))
fig, axs = plt.subplots(2, 2, \
figsize=(8, 12), \
gridspec_kw={'width_ratios':(12, 1),
'height_ratios': (65, 35)})
heatplot = axs[0, 0].imshow( (10*(np.log10(1-similarity))), \
norm=colors.TwoSlopeNorm(vmin=-30., vcenter=-10., vmax=0), \
cmap = 'RdBu',\
extent=[0, silhouette.shape[0]*stepwise, \
silhouette.shape[0]*stepwise, 0])
cb = fig.colorbar(heatplot, cax=axs[0, 1])
axs[1, 0].plot(np.arange(silhouette.shape[0])*stepwise, silhouette,)
axs[1, 0].set_xlim([0, silhouette.shape[0]*stepwise])
axs[1, 1].remove()
axs[0, 0].set_ylabel('HCs (allelic distances)')
axs[0, 0].set_xlabel('HCs (allelic distances)')
axs[1, 0].set_ylabel('Silhouette scores')
axs[1, 0].set_xlabel('HCs (allelic distances)')
cb.set_label('Normalized Mutual Information')
cb.set_ticks([-30, -23.01, -20, -13.01, -10, -3.01, 0])
cb.ax.set_yticklabels(['>=.999', '.995', '.99', '.95', '.9', '.5', '.0'])
plt.savefig(output+'.pdf')
logging.info('Tab-delimited evaluation is saved in {0}.tsv'.format(output))
logging.info('Visualisation is saved in {0}.pdf'.format(output))
pool.close()
if __name__ == '__main__' :
evalHCC() | PypiClean |
/MezzanineFor1.7-3.1.10.tar.gz/MezzanineFor1.7-3.1.10/mezzanine/blog/migrations/south/0013_auto__chg_field_blogpost_featured_image.py |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'BlogPost.featured_image'
db.alter_column('blog_blogpost', 'featured_image', self.gf('mezzanine.core.fields.FileField')(max_length=255, null=True))
def backwards(self, orm):
# Changing field 'BlogPost.featured_image'
db.alter_column('blog_blogpost', 'featured_image', self.gf('django.db.models.fields.files.FileField')(max_length=255, null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blog.blogcategory': {
'Meta': {'object_name': 'BlogCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'blog.blogpost': {
'Meta': {'ordering': "('-publish_date',)", 'object_name': 'BlogPost'},
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'blogposts'", 'blank': 'True', 'to': "orm['blog.BlogCategory']"}),
#'comments': ('mezzanine.generic.fields.CommentsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.ThreadedComment']"}),
'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
#'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']"}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
#'rating': ('mezzanine.generic.fields.RatingField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.Rating']"}),
'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blogposts'", 'to': "orm['%s']" % user_orm_label})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['%s']" % user_orm_label}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'generic.threadedcomment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'ThreadedComment', '_ormbases': ['comments.Comment']},
'by_author': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'replied_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'null': 'True', 'to': "orm['generic.ThreadedComment']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['blog'] | PypiClean |
/EdaSpiffWorkflow-0.0.2.tar.gz/EdaSpiffWorkflow-0.0.2/EdaSpiffWorkflow_Aadesh_G/bpmn/serializer/workflow.py | import json
import gzip
from copy import deepcopy
from uuid import UUID
from .bpmn_converters import BpmnDataConverter
from ..workflow import BpmnWorkflow
from ..specs.SubWorkflowTask import SubWorkflowTask
from ...task import Task
from .workflow_spec_converter import BpmnProcessSpecConverter
from .task_spec_converters import SimpleTaskConverter, StartTaskConverter, EndJoinConverter, LoopResetTaskConverter
from .task_spec_converters import NoneTaskConverter, UserTaskConverter, ManualTaskConverter, ScriptTaskConverter
from .task_spec_converters import CallActivityTaskConverter, TransactionSubprocessTaskConverter
from .task_spec_converters import StartEventConverter, EndEventConverter
from .task_spec_converters import IntermediateCatchEventConverter, IntermediateThrowEventConverter
from .task_spec_converters import BoundaryEventConverter, BoundaryEventParentConverter
from .task_spec_converters import ParallelGatewayConverter, ExclusiveGatewayConverter, InclusiveGatewayConverter
DEFAULT_TASK_SPEC_CONVERTER_CLASSES = [
SimpleTaskConverter, StartTaskConverter, EndJoinConverter, LoopResetTaskConverter,
NoneTaskConverter, UserTaskConverter, ManualTaskConverter, ScriptTaskConverter,
CallActivityTaskConverter, TransactionSubprocessTaskConverter,
StartEventConverter, EndEventConverter,
IntermediateCatchEventConverter, IntermediateThrowEventConverter,
BoundaryEventConverter, BoundaryEventParentConverter,
ParallelGatewayConverter, ExclusiveGatewayConverter, InclusiveGatewayConverter
]
class BpmnWorkflowSerializer:
"""
This class implements a customizable BPMN Workflow serializer, based on a Workflow Spec Converter
and a Data Converter.
The goal is to provide modular serialization capabilities.
You'll need to configure a Workflow Spec Converter with Task Spec Converters for any task types
present in your workflows. Because the Task Spec Converters also require initialization, the process
of building a Workflow Spec Converter is a little tedious; therefore, this class provides a static
method `configure_workflow_spec_converter` that can extend and/or override the default Task Spec
Converter list and return a Workflow Spec Converter that will recognize the overridden specs.
If you have implemented any custom task specs, you'll need to write a converter to handle them and
provide it to this method; if you using only the defaults, you can call this with no arguments.
If your workflow contains non-JSON-serializable objects, you'll need to extend or replace the
default data converter with one that will handle them. This converter needs to implement
`convert` and `restore` methods.
Serialization occurs in two phases: the first is to convert everything in the workflow to a
dictionary containins only JSON-serializable objects and the second is dumping to JSON.
This means that you can call the `workflow_to_dict` or `workflow_from_dict` methods separately from
conversion to JSON for further manipulation of the state, or selective serialization of only certain
parts of the workflow more conveniently. You can of course call methods from the Workflow Spec and
Data Converters via the `spec_converter` and `data_converter` attributes as well to bypass the
overhead of converting or restoring the entire thing.
"""
# This is the default version set on the workflow, it can be overwritten
# using the configure_workflow_spec_converter.
VERSION = "1.0"
@staticmethod
def configure_workflow_spec_converter(task_spec_overrides=None, data_converter=None, version=VERSION):
"""
This method can be used to add additional task spec converters to the default BPMN Process
converter.
The task specs may contain arbitrary data, though none of the default task specs use it. We
may disallow that in the future, so we don't recommend using this capability.
The task spec converters also take an optional typename argument; this will be included in the
serialized dictionaries so that the original class can restored. The unqualified classname is
used if none is provided. If a class in `task_spec_overrides` conflicts with one of the
defaults, the default will be removed and the provided one will be used instead. If you need
both for some reason, you'll have to instantiate the task spec converters and workflow spec
converter yourself.
:param task_spec_overrides: a list of task spec converter classes
:param data_converter: an optional data converter for task spec data
"""
if task_spec_overrides is None:
task_spec_overrides = []
classnames = [c.__name__ for c in task_spec_overrides]
converters = [c(data_converter=data_converter) for c in task_spec_overrides]
for c in DEFAULT_TASK_SPEC_CONVERTER_CLASSES:
if c.__name__ not in classnames:
converters.append(c(data_converter=data_converter))
return BpmnProcessSpecConverter(converters, version)
def __init__(self, spec_converter=None, data_converter=None, wf_class=None, version=VERSION):
"""Intializes a Workflow Serializer with the given Workflow, Task and Data Converters.
:param spec_converter: the workflow spec converter
:param data_converter: the data converter
:param wf_class: the workflow class
"""
super().__init__()
self.spec_converter = spec_converter if spec_converter is not None else self.configure_workflow_spec_converter()
self.data_converter = data_converter if data_converter is not None else BpmnDataConverter()
self.wf_class = wf_class if wf_class is not None else BpmnWorkflow
self.VERSION = version
def serialize_json(self, workflow, use_gzip=False):
"""Serialize the dictionary representation of the workflow to JSON.
:param workflow: the workflow to serialize
Returns:
a JSON dump of the dictionary representation
"""
dct = self.workflow_to_dict(workflow)
dct['serializer_version'] = self.VERSION
json_str = json.dumps(dct)
return gzip.compress(json_str.encode('utf-8')) if use_gzip else json_str
def deserialize_json(self, serialization, read_only=False, use_gzip=False):
dct = json.loads(gzip.decompress(serialization)) if use_gzip else json.loads(serialization)
version = dct.pop('serializer_version')
return self.workflow_from_dict(dct, read_only)
def get_version(self, serialization):
try:
dct = json.loads(serialization)
if 'serializer_version' in dct:
return dct['serializer_version']
except: # Don't bail out trying to get a version, just return none.
return None
def workflow_to_dict(self, workflow):
"""Return a JSON-serializable dictionary representation of the workflow.
:param workflow: the workflow
Returns:
a dictionary representation of the workflow
"""
# Recursively search the workflow spec for subprocesses and store clean copies of each
# (they are modified by running workflows) at the top level.
subprocess_specs, subprocesses = {}, {}
self.find_subprocesses(workflow.spec, workflow, subprocess_specs, subprocesses)
for wf_name, wf_spec in subprocess_specs.items():
subprocess_specs[wf_name] = self.spec_converter.convert(wf_spec)
return {
'spec': self.spec_converter.convert(workflow.spec),
'data': self.data_converter.convert(workflow.data),
'last_task': str(workflow.last_task.id) if workflow.last_task is not None else None,
'success': workflow.success,
'tasks': self.task_tree_to_dict(workflow.task_tree),
'root': str(workflow.task_tree.id),
'subprocess_specs': subprocess_specs,
'subprocesses': subprocesses,
}
def workflow_from_dict(self, dct, read_only=False):
"""Create a workflow based on a dictionary representation.
:param dct: the dictionary representation
:param read_only: optionally disable modifying the workflow
Returns:
a BPMN Workflow object
"""
dct_copy = deepcopy(dct)
# First, we'll restore the specs for all the subprocesses
for name, wf_dct in dct_copy['subprocess_specs'].items():
dct_copy['subprocess_specs'][name] = self.spec_converter.restore(wf_dct)
# Then we check each subprocess for subworkflow tasks and restore the workflow spec
# on each task
for wf in dct_copy['subprocess_specs'].values():
for task_spec in wf.task_specs.values():
if isinstance(task_spec, SubWorkflowTask):
task_spec.spec = dct_copy['subprocess_specs'][task_spec.spec]
# We also have to do the same for the top levvel workflow.
spec = self.spec_converter.restore(dct_copy.pop('spec'))
for name, task_spec in spec.task_specs.items():
if isinstance(task_spec, SubWorkflowTask):
task_spec.spec = dct_copy['subprocess_specs'][task_spec.spec]
workflow = self.wf_class(spec, read_only=read_only)
workflow.data = self.data_converter.restore(dct_copy.pop('data'))
workflow.success = dct_copy.pop('success')
root = dct_copy.pop('root')
workflow.task_tree = self.task_tree_from_dict(dct_copy, root, None, workflow, read_only)
return workflow
def task_to_dict(self, task):
return {
'id': str(task.id),
'parent': str(task.parent.id) if task.parent is not None else None,
'children': [ str(child.id) for child in task.children ],
'last_state_change': task.last_state_change,
'state': task.state,
'task_spec': task.task_spec.name,
'triggered': task.triggered,
'workflow_name': task.workflow.name,
'internal_data': self.data_converter.convert(task.internal_data),
'data': self.data_converter.convert(task.data),
}
def task_from_dict(self, dct, workflow, task_spec, parent):
task = Task(workflow, task_spec, parent)
task.id = UUID(dct['id'])
task.state = dct['state']
task.last_state_change = dct['last_state_change']
task.triggered = dct['triggered']
task.internal_data = self.data_converter.restore(dct['internal_data'])
task.data = self.data_converter.restore(dct['data'])
return task
def task_tree_to_dict(self, root):
tasks = { }
def add_task(task):
dct = self.task_to_dict(task)
tasks[dct['id']] = dct
for child in task.children:
add_task(child)
add_task(root)
return tasks
def task_tree_from_dict(self, dct, task_id, parent, workflow, read_only):
task_dict = dct['tasks'][task_id]
task_spec = workflow.spec.task_specs[task_dict['task_spec']]
task = self.task_from_dict(task_dict, workflow, task_spec, parent)
if task_id == dct['last_task']:
workflow.last_task = task
children = [dct['tasks'][c] for c in task_dict['children']]
subtasks = dct['subprocesses'].get(str(task_id), [])
if isinstance(task_spec, SubWorkflowTask):
if len(subtasks) > 0:
task_spec.sub_workflow = self.wf_class(
task_spec.spec, name=task_spec.name, parent=workflow, read_only=read_only)
root = subtasks[0]
task_spec.sub_workflow.task_tree = self.task_tree_from_dict(
dct, root, task, task_spec.sub_workflow, read_only)
task_spec.sub_workflow.completed_event.connect(task_spec._on_subworkflow_completed, task)
else:
task_spec.sub_workflow = None
for child in [ c for c in children if c['id'] not in subtasks ]:
self.task_tree_from_dict(dct, child['id'], task, workflow, read_only)
return task
def find_subprocesses(self, spec, workflow, subprocess_specs, subprocesses):
for name, task_spec in spec.task_specs.items():
if isinstance(task_spec, SubWorkflowTask):
self.find_subprocesses(task_spec.spec, workflow, subprocess_specs, subprocesses)
if task_spec.name not in subprocess_specs:
subprocess_specs[task_spec.spec.name] = task_spec.spec
if task_spec.sub_workflow is not None:
tasks = [task for task in workflow.get_tasks() if task in task_spec.sub_workflow.get_tasks()]
if len(tasks) > 0:
subprocesses[str(tasks[0].parent.id)] = [str(task.id) for task in tasks] | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/charting/plot2d/Bubble.js | define("dojox/charting/plot2d/Bubble",["dojo/_base/lang","dojo/_base/declare","dojo/_base/array","./Base","./common","dojox/lang/functional","dojox/lang/functional/reversed","dojox/lang/utils","dojox/gfx/fx"],function(_1,_2,_3,_4,dc,df,_5,du,fx){
var _6=_5.lambda("item.purgeGroup()");
return _2("dojox.charting.plot2d.Bubble",_4,{defaultParams:{hAxis:"x",vAxis:"y",animate:null},optionalParams:{stroke:{},outline:{},shadow:{},fill:{},font:"",fontColor:""},constructor:function(_7,_8){
this.opt=_1.clone(this.defaultParams);
du.updateWithObject(this.opt,_8);
du.updateWithPattern(this.opt,_8,this.optionalParams);
this.series=[];
this.hAxis=this.opt.hAxis;
this.vAxis=this.opt.vAxis;
this.animate=this.opt.animate;
},render:function(_9,_a){
if(this.zoom&&!this.isDataDirty()){
return this.performZoom(_9,_a);
}
this.resetEvents();
this.dirty=this.isDirty();
if(this.dirty){
_3.forEach(this.series,_6);
this._eventSeries={};
this.cleanGroup();
var s=this.group;
df.forEachRev(this.series,function(_b){
_b.cleanGroup(s);
});
}
var t=this.chart.theme,ht=this._hScaler.scaler.getTransformerFromModel(this._hScaler),vt=this._vScaler.scaler.getTransformerFromModel(this._vScaler),_c=this.events();
for(var i=this.series.length-1;i>=0;--i){
var _d=this.series[i];
if(!this.dirty&&!_d.dirty){
t.skip();
this._reconnectEvents(_d.name);
continue;
}
_d.cleanGroup();
if(!_d.data.length){
_d.dirty=false;
t.skip();
continue;
}
if(typeof _d.data[0]=="number"){
console.warn("dojox.charting.plot2d.Bubble: the data in the following series cannot be rendered as a bubble chart; ",_d);
continue;
}
var _e=t.next("circle",[this.opt,_d]),s=_d.group,_f=_3.map(_d.data,function(v,i){
return v?{x:ht(v.x)+_a.l,y:_9.height-_a.b-vt(v.y),radius:this._vScaler.bounds.scale*(v.size/2)}:null;
},this);
var _10=null,_11=null,_12=null;
if(_e.series.shadow){
_12=_3.map(_f,function(_13){
if(_13!==null){
var _14=t.addMixin(_e,"circle",_13,true),_15=_14.series.shadow;
var _16=s.createCircle({cx:_13.x+_15.dx,cy:_13.y+_15.dy,r:_13.radius}).setStroke(_15).setFill(_15.color);
if(this.animate){
this._animateBubble(_16,_9.height-_a.b,_13.radius);
}
return _16;
}
return null;
},this);
if(_12.length){
_d.dyn.shadow=_12[_12.length-1].getStroke();
}
}
if(_e.series.outline){
_11=_3.map(_f,function(_17){
if(_17!==null){
var _18=t.addMixin(_e,"circle",_17,true),_19=dc.makeStroke(_18.series.outline);
_19.width=2*_19.width+_e.series.stroke.width;
var _1a=s.createCircle({cx:_17.x,cy:_17.y,r:_17.radius}).setStroke(_19);
if(this.animate){
this._animateBubble(_1a,_9.height-_a.b,_17.radius);
}
return _1a;
}
return null;
},this);
if(_11.length){
_d.dyn.outline=_11[_11.length-1].getStroke();
}
}
_10=_3.map(_f,function(_1b){
if(_1b!==null){
var _1c=t.addMixin(_e,"circle",_1b,true),_1d={x:_1b.x-_1b.radius,y:_1b.y-_1b.radius,width:2*_1b.radius,height:2*_1b.radius};
var _1e=this._plotFill(_1c.series.fill,_9,_a);
_1e=this._shapeFill(_1e,_1d);
var _1f=s.createCircle({cx:_1b.x,cy:_1b.y,r:_1b.radius}).setFill(_1e).setStroke(_1c.series.stroke);
if(this.animate){
this._animateBubble(_1f,_9.height-_a.b,_1b.radius);
}
return _1f;
}
return null;
},this);
if(_10.length){
_d.dyn.fill=_10[_10.length-1].getFill();
_d.dyn.stroke=_10[_10.length-1].getStroke();
}
if(_c){
var _20=new Array(_10.length);
_3.forEach(_10,function(s,i){
if(s!==null){
var o={element:"circle",index:i,run:_d,shape:s,outline:_11&&_11[i]||null,shadow:_12&&_12[i]||null,x:_d.data[i].x,y:_d.data[i].y,r:_d.data[i].size/2,cx:_f[i].x,cy:_f[i].y,cr:_f[i].radius};
this._connectEvents(o);
_20[i]=o;
}
},this);
this._eventSeries[_d.name]=_20;
}else{
delete this._eventSeries[_d.name];
}
_d.dirty=false;
}
this.dirty=false;
return this;
},_animateBubble:function(_21,_22,_23){
fx.animateTransform(_1.delegate({shape:_21,duration:1200,transform:[{name:"translate",start:[0,_22],end:[0,0]},{name:"scale",start:[0,1/_23],end:[1,1]},{name:"original"}]},this.animate)).play();
}});
}); | PypiClean |
/Flaskel-3.1.0rc2-py3-none-any.whl/flaskel/utils/logger.py | REQUEST_FORMATTER = "flask_logify.formatters.RequestFormatter"
def handler(formatter, **kwargs):
return {
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
"formatter": formatter,
**kwargs,
}
LOGGING = dict(
version=1,
disable_existing_loggers=False,
objects={"queue": {"class": "queue.Queue", "maxsize": 10000}},
formatters={
"simple": {"format": "[%(asctime)s][%(levelname)s]: %(message)s"},
"consoleDebug": {
"class": REQUEST_FORMATTER,
"format": "[%(asctime)s]"
"[%(levelname)s]"
"[%(request_id)s]"
"[%(name)s:%(module)s.%(funcName)s:%(lineno)d]: "
"%(message)s",
},
"console": {
"class": REQUEST_FORMATTER,
"format": "[%(asctime)s][%(levelname)s][%(request_id)s]: %(message)s",
},
"syslog": {
"class": REQUEST_FORMATTER,
"format": "%(ident)s%(message)s",
},
"syslogNoRequest": {"format": "%(ident)s%(message)s"},
"json": {
"class": REQUEST_FORMATTER,
"format": "{"
'"requestId":"%(request_id)s",'
'"level":"%(levelname)s",'
'"datetime":"%(asctime)s",'
'"message":%(message)s'
"}",
},
},
handlers={
"simple": handler("simple"),
"console": handler("console"),
"consoleDebug": handler("consoleDebug"),
"syslog": {
"class": "flask_logify.handlers.FlaskSysLogHandler",
"address": ["localhost", 514],
"formatter": "syslog",
"facility": "user",
},
"syslogNoRequest": {
"class": "flask_logify.handlers.FlaskSysLogHandler",
"address": ["localhost", 514],
"formatter": "syslogNoRequest",
"facility": "user",
},
"queueConsole": {
"respect_handler_level": True,
"class": "flask_logify.handlers.QueueHandler",
"queue": "cfg://objects.queue",
"handlers": ["cfg://handlers.console"],
},
"queueSyslogNoRequest": {
"respect_handler_level": True,
"class": "flask_logify.handlers.QueueHandler",
"queue": "cfg://objects.queue",
"handlers": ["cfg://handlers.syslogNoRequest"],
},
},
loggers={
"development": {
"level": "DEBUG",
"propagate": True,
"handlers": ["consoleDebug"],
},
"developmentQueue": {
"level": "DEBUG",
"propagate": True,
"handlers": ["queueConsole"],
},
"production": {"level": "INFO", "propagate": True, "handlers": ["console"]},
"productionQueue": {
"level": "INFO",
"propagate": True,
"handlers": ["queueConsole"],
},
"flask-limiter": {"level": "DEBUG", "propagate": True, "handlers": ["simple"]},
"gunicorn.error": {
"level": "INFO",
"propagate": True,
"handlers": ["queueSyslogNoRequest"],
},
"gunicorn.access": {
"level": "INFO",
"propagate": True,
"handlers": ["queueSyslogNoRequest"],
},
},
)
if __name__ == "__main__": # pragma: no cover
import json
print(json.dumps(LOGGING)) | PypiClean |
/AnyRobot-1.5.4.tar.gz/AnyRobot-1.5.4/aishu/datafaker/profession/entity/RetrievesAssociated.py | import random,json
from aishu.datafaker.profession.entity import date
from aishu.datafaker.profession.entity.AssociationMapping import sql
from aishu.public.db_select import select
class ParaDateAnyRobotServer(object):
def __init__(self,key):
self.key = key
self.sql = sql(self.key)
def getSavedSearchId(self):
ar_id_list = []
if isinstance(self.sql, bool):
return 0
Id_date = select(self.sql)
if len(Id_date) == 0:
return 0
for value in Id_date:
ar_id_list.append(value[0])
SavedSearch_Id = random.choice(ar_id_list)
date.saved_search_Id_List.append(SavedSearch_Id)
return date.saved_search_Id_List[0]
def getSavedSearchNameId(self):
# 查询id对应的Name
if isinstance(self.sql, bool):
return 0
Name_date = select(self.sql)
if len(Name_date) == 0:
return 0
return Name_date[0][0]
def getSavedSearchLogGroupId(self):
# 查询id对应的SavedSearchLogGroupId
if isinstance(self.sql, bool):
return 0
date = select(self.sql)
if len(date) == 0:
return 0
return date[0][0].replace("\"", "")
def getSavedSearchLogLibraryId(self):
# 查询对应的SavedSearchLogLibraryId
if isinstance(self.sql, bool):
return 0
date = select(self.sql)
if len(date) == 0:
return 0
return date[0][0].replace("\"", "")
def getAlertRuleNamesId(self):
ar_id_list = []
date = select(self.sql)
if len(date) == 0:
return 0
for value in date:
ar_id_list.append(value[0])
return random.choice(ar_id_list)
def getAlertScenarioId(self):
ar_id_list = []
if isinstance(self.sql, bool):
return 0
Id_date = select(self.sql)
if len(Id_date) == 0:
return 0
for value in Id_date:
ar_id_list.append(value[0])
SavedSearch_Id = random.choice(ar_id_list)
date.AlertScenario_Id_List.append(SavedSearch_Id)
return date.AlertScenario_Id_List[0]
def getDeleteAlertRuleNamesId(self):
# 查询场景策略id对应的规则策略名称
if isinstance(self.sql, bool):
return 0
Name_date = select(self.sql)
if len(Name_date) == 0:
return 0
filter = json.loads(Name_date[0][0])[0]
return filter
def getServiceId(self):
ar_id_list = []
if isinstance(self.sql, bool):
return 0
Id_date = select(self.sql)
if len(Id_date) == 0:
return 0
for value in Id_date:
ar_id_list.append(value[0])
date.Service_Id_List.append(random.choice(ar_id_list))
return date.Service_Id_List[0]
def getKpiId(self):
# 查询服务对应的KPIId
if isinstance(self.sql, bool):
return 0
Name_date = select(self.sql)
if len(Name_date) == 0:
return 0
return Name_date[0][0] | PypiClean |
/ISAMBARD-2.3.1.tar.gz/ISAMBARD-2.3.1/src/isambard/specifications/coiledcoil.py |
from ampal import Assembly
import numpy
from .helix import HelicalHelix, _helix_parameters
basis_set_parameters = {
2: {'name': 'CC-Di', 'pitch': 225.8, 'radius': 5.07, 'interface_angle': 283.56,
'sequence': 'EIAALKQEIAALKKENAALKWEIAALKQ'},
3: {'name': 'CC-Tri', 'pitch': 194.0, 'radius': 6.34, 'interface_angle': 277.12,
'sequence': 'EIAAIKQEIAAIKKEIAAIKWEIAAIKQ'},
4: {'name': 'CC-Tet', 'pitch': 213.2, 'radius': 6.81, 'interface_angle': 279.20,
'sequence': 'ELAAIKQELAAIKKELAAIKWELAAIKQ'},
5: {'name': 'CC-Pent', 'pitch': 182.8, 'radius': 8.62, 'interface_angle': 271.58,
'sequence': 'KIEQILQKIEKILQKIEWILQKIEQILQ'},
6: {'name': 'CC-Hex', 'pitch': 228.4, 'radius': 9.13, 'interface_angle': 273.28,
'sequence': 'ELKAIAQELKAIAKELKAIAWELKAIAQ'},
7: {'name': 'CC-Hept', 'pitch': 328.6, 'radius': 9.80, 'interface_angle': 272.24,
'sequence': 'EIAQALKEIAKALKEIAWALKEIAQALK'},
}
class CoiledCoil(Assembly):
"""Models a coiled-coil protein.
Notes
-----
Instantiating this class using just an oligomeric state is used
to create simple reference models. To build more complex models
use the `from_parameters` classmethod.
Parameters
----------
n : int
The oligomeric state of the model to be built.
auto_build : bool, optional
If `True`, the model will be built as part of instantiation.
Attributes
----------
aas : [int]
Number of amino acids in each minor helix.
basis_set_sequences : [str]
Reference sequences for the oligomeric state that has been
selected, taken from the basis set of coiled coils.
major_radii : [float]
Radii of the minor helices relative to the super-helical
axis.
major_pitches : [float]
Pitch values of the minor helices relative to the super-helical
axis.
phi_c_alphas :
Relative rotation values of the minor helices relative to
the super-helical axis.
minor_helix_types : [str]
Helix types of the minor helices. Can be: 'alpha', 'pi', '3-10',
'PPI', 'PP2', 'collagen'.
major_handedness : str
Handedness of the super helix.
orientations :
Orientation of helices relative to the super-helical axis. 1
is parallel, -1 is anti-parallel.
minor_repeats : [float]
Hydrophobic repeats of the minor helices.
rotational_offsets :
Rotation of the minor helices relative to the super-helical
axis.
z_shifts : [float]
Translation of the minor helices along the super-helical axis.
oligomeric_state : int
Oligomeric state of the coiled coil.
"""
def __init__(self, n, auto_build=True):
super(CoiledCoil, self).__init__()
# parameters for each polypeptide
# basis set parameters if known, otherwise educated guesses.
if n in basis_set_parameters.keys():
parameters = basis_set_parameters[n]
radius = parameters['radius']
else:
# calculate radius based on extrapolated straight-line fit
# of n Vs radius for basis_set_parameters
radius = (n * 0.966) + 3.279
# other default values just copied from largest oligomer
# in basis_set_parameters.
parameters = basis_set_parameters[max(basis_set_parameters.keys())]
self.major_radii = [radius] * n
self.major_pitches = [parameters['pitch']] * n
self.basis_set_sequences = [parameters['sequence']] * n
self.aas = [len(parameters['sequence'])] * n
self.phi_c_alphas = [parameters['interface_angle']] * n
# alpha-helical barrel with heptad repeat as default.
self.major_handedness = ['l'] * n
self.minor_helix_types = ['alpha'] * n
self.orientations = [1] * n
self.minor_repeats = [3.5] * n
# parameters for the arrangement of each polypeptide
# (evenly distributed, no z-displacement).
self.rotational_offsets = [((i * 360.0) / n) for i in range(n)]
self.z_shifts = [0.0] * n
# parameters for the whole assembly
self.oligomeric_state = n
if auto_build:
self.build()
@classmethod
def from_polymers(cls, polymers):
"""Creates a `CoiledCoil` from a list of `HelicalHelices`.
Parameters
----------
polymers : [HelicalHelix]
List of `HelicalHelices`.
"""
n = len(polymers)
instance = cls(n=n, auto_build=False)
instance.major_radii = [x.major_radius for x in polymers]
instance.major_pitches = [x.major_pitch for x in polymers]
instance.major_handedness = [x.major_handedness for x in polymers]
instance.aas = [x.num_monomers for x in polymers]
instance.minor_helix_types = [x.minor_helix_type for x in polymers]
instance.orientations = [x.orientation for x in polymers]
instance.phi_c_alphas = [x.phi_c_alpha for x in polymers]
instance.minor_repeats = [x.minor_repeat for x in polymers]
instance.build()
return instance
@classmethod
def from_parameters(cls, n, aa=28, major_radius=None, major_pitch=None,
phi_c_alpha=26.42, minor_helix_type='alpha',
auto_build=True):
"""Creates a `CoiledCoil` from defined super-helical parameters.
Parameters
----------
n : int
Oligomeric state
aa : int, optional
Number of amino acids per minor helix.
major_radius : float, optional
Radius of super helix.
major_pitch : float, optional
Pitch of super helix.
phi_c_alpha : float, optional
Rotation of minor helices relative to the super-helical
axis.
minor_helix_type : float, optional
Helix type of minor helices. Can be: 'alpha', 'pi', '3-10',
'PPI', 'PP2', 'collagen'.
auto_build : bool, optional
If `True`, the model will be built as part of instantiation.
"""
instance = cls(n=n, auto_build=False)
instance.aas = [aa] * n
instance.phi_c_alphas = [phi_c_alpha] * n
instance.minor_helix_types = [minor_helix_type] * n
if major_pitch is not None:
instance.major_pitches = [major_pitch] * n
if major_radius is not None:
instance.major_radii = [major_radius] * n
if auto_build:
instance.build()
return instance
@classmethod
def tropocollagen(
cls, aa=28, major_radius=5.0, major_pitch=85.0, auto_build=True):
"""Creates a model of a collagen triple helix.
Parameters
----------
aa : int, optional
Number of amino acids per minor helix.
major_radius : float, optional
Radius of super helix.
major_pitch : float, optional
Pitch of super helix.
auto_build : bool, optional
If `True`, the model will be built as part of instantiation.
"""
instance = cls.from_parameters(
n=3, aa=aa, major_radius=major_radius, major_pitch=major_pitch,
phi_c_alpha=0.0, minor_helix_type='collagen', auto_build=False)
instance.major_handedness = ['r'] * 3
# default z-shifts taken from rise_per_residue of collagen helix
rpr_collagen = _helix_parameters['collagen'][1]
instance.z_shifts = [-rpr_collagen * 2, -rpr_collagen, 0.0]
instance.minor_repeats = [None] * 3
if auto_build:
instance.build()
return instance
def build(self):
"""Builds a model of a coiled coil protein using input parameters."""
monomers = [HelicalHelix(major_pitch=self.major_pitches[i],
major_radius=self.major_radii[i],
major_handedness=self.major_handedness[i],
aa=self.aas[i],
minor_helix_type=self.minor_helix_types[i],
orientation=self.orientations[i],
phi_c_alpha=self.phi_c_alphas[i],
minor_repeat=self.minor_repeats[i],
)
for i in range(self.oligomeric_state)]
axis_unit_vector = numpy.array([0, 0, 1])
for i, m in enumerate(monomers):
m.rotate(angle=self.rotational_offsets[i], axis=axis_unit_vector)
m.translate(axis_unit_vector * self.z_shifts[i])
self._molecules = monomers[:]
self.relabel_all()
for m in self._molecules:
m.ampal_parent = self
return
__author__ = 'Andrew R. Thomson, Christopher W. Wood, Jack W. Heal' | PypiClean |
/FAaDO-0.0.4.tar.gz/FAaDO-0.0.4/fado/cli/arguments/arguments.py | import importlib
import logging
import os
import random
import sys
from pathlib import Path
import yaml
from fado.constants import IMPORT_OUT
logger = logging.getLogger('fado')
logger = logging.LoggerAdapter(logger, {'node_id': 'arguments'})
class FADOArguments:
""" A class for reading arguments from a yaml file """
def __init__(self, config_path=None):
"""
Parameters:
config_path(str): path of yaml configuration file
"""
if config_path is not None:
with open(config_path, 'r') as file:
args = yaml.load(file, Loader=yaml.FullLoader)
config_path = Path(config_path).parent.absolute().__str__()
setattr(self, 'config_path', config_path)
self._set_arguments(args, config_path)
self._process_arguments()
def __new__(cls, config_path=None):
if not hasattr(cls, 'instance'):
cls.instance = super(FADOArguments, cls).__new__(cls)
return cls.instance
def _process_arguments(self):
""" Uses the arguments read to do needed computations"""
if 'random_seed' in self:
random.seed(self.random_seed)
# TODO: check if TF or Torch is in use and set seed
if 'python_import_folder' in self:
sys.path.append(os.path.join(IMPORT_OUT))
sys.path.append('/app/import')
def _set_arguments(self, key_pairs, config_path):
""" Sets the arguments
Parameters:
key_pairs(dict): key, value pairs with sections(dicts) that contain n (property_name, property_value)
"""
for section_name, section in key_pairs.items():
if type(section) == dict:
self._set_arguments(section, config_path)
elif type(section) == list:
if 'vary' not in self:
self.vary = {}
self.vary[section_name] = section
else:
setattr(self, section_name, section)
def set_argument(self, key, value):
setattr(self, key, value)
def get_argument(self, key):
return getattr(self, key)
def __contains__(self, key):
return hasattr(self, key)
def save_to_file(self, file_path):
with open(file_path, 'w') as file:
yaml.dump(self.__dict__, file)
def get_class(self, key):
module_name = os.path.join(self.get_argument(key))[:-3]
module = importlib.import_module(module_name)
return module.get_class() | PypiClean |
/Mezzanine-6.0.0.tar.gz/Mezzanine-6.0.0/mezzanine/core/static/mezzanine/tinymce/langs/zh_CN.js | tinymce.addI18n('zh_CN',{
"Cut": "\u526a\u5207",
"Heading 5": "\u6807\u98985",
"Header 2": "\u6807\u98982",
"Your browser doesn't support direct access to the clipboard. Please use the Ctrl+X\/C\/V keyboard shortcuts instead.": "\u4f60\u7684\u6d4f\u89c8\u5668\u4e0d\u652f\u6301\u5bf9\u526a\u8d34\u677f\u7684\u8bbf\u95ee\uff0c\u8bf7\u4f7f\u7528Ctrl+X\/C\/V\u952e\u8fdb\u884c\u590d\u5236\u7c98\u8d34\u3002",
"Heading 4": "\u6807\u98984",
"Div": "Div\u533a\u5757",
"Heading 2": "\u6807\u98982",
"Paste": "\u7c98\u8d34",
"Close": "\u5173\u95ed",
"Font Family": "\u5b57\u4f53",
"Pre": "\u9884\u683c\u5f0f\u6587\u672c",
"Align right": "\u53f3\u5bf9\u9f50",
"New document": "\u65b0\u6587\u6863",
"Blockquote": "\u5f15\u7528",
"Numbered list": "\u7f16\u53f7\u5217\u8868",
"Heading 1": "\u6807\u98981",
"Headings": "\u6807\u9898",
"Increase indent": "\u589e\u52a0\u7f29\u8fdb",
"Formats": "\u683c\u5f0f",
"Headers": "\u6807\u9898",
"Select all": "\u5168\u9009",
"Header 3": "\u6807\u98983",
"Blocks": "\u533a\u5757",
"Undo": "\u64a4\u6d88",
"Strikethrough": "\u5220\u9664\u7ebf",
"Bullet list": "\u9879\u76ee\u7b26\u53f7",
"Header 1": "\u6807\u98981",
"Superscript": "\u4e0a\u6807",
"Clear formatting": "\u6e05\u9664\u683c\u5f0f",
"Font Sizes": "\u5b57\u53f7",
"Subscript": "\u4e0b\u6807",
"Header 6": "\u6807\u98986",
"Redo": "\u91cd\u590d",
"Paragraph": "\u6bb5\u843d",
"Ok": "\u786e\u5b9a",
"Bold": "\u7c97\u4f53",
"Code": "\u4ee3\u7801",
"Italic": "\u659c\u4f53",
"Align center": "\u5c45\u4e2d",
"Header 5": "\u6807\u98985",
"Heading 6": "\u6807\u98986",
"Heading 3": "\u6807\u98983",
"Decrease indent": "\u51cf\u5c11\u7f29\u8fdb",
"Header 4": "\u6807\u98984",
"Paste is now in plain text mode. Contents will now be pasted as plain text until you toggle this option off.": "\u5f53\u524d\u4e3a\u7eaf\u6587\u672c\u7c98\u8d34\u6a21\u5f0f\uff0c\u518d\u6b21\u70b9\u51fb\u53ef\u4ee5\u56de\u5230\u666e\u901a\u7c98\u8d34\u6a21\u5f0f\u3002",
"Underline": "\u4e0b\u5212\u7ebf",
"Cancel": "\u53d6\u6d88",
"Justify": "\u4e24\u7aef\u5bf9\u9f50",
"Inline": "\u6587\u672c",
"Copy": "\u590d\u5236",
"Align left": "\u5de6\u5bf9\u9f50",
"Visual aids": "\u7f51\u683c\u7ebf",
"Lower Greek": "\u5c0f\u5199\u5e0c\u814a\u5b57\u6bcd",
"Square": "\u65b9\u5757",
"Default": "\u9ed8\u8ba4",
"Lower Alpha": "\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd",
"Circle": "\u7a7a\u5fc3\u5706",
"Disc": "\u5b9e\u5fc3\u5706",
"Upper Alpha": "\u5927\u5199\u82f1\u6587\u5b57\u6bcd",
"Upper Roman": "\u5927\u5199\u7f57\u9a6c\u5b57\u6bcd",
"Lower Roman": "\u5c0f\u5199\u7f57\u9a6c\u5b57\u6bcd",
"Name": "\u540d\u79f0",
"Anchor": "\u951a\u70b9",
"You have unsaved changes are you sure you want to navigate away?": "\u4f60\u8fd8\u6709\u6587\u6863\u5c1a\u672a\u4fdd\u5b58\uff0c\u786e\u5b9a\u8981\u79bb\u5f00\uff1f",
"Restore last draft": "\u6062\u590d\u4e0a\u6b21\u7684\u8349\u7a3f",
"Special character": "\u7279\u6b8a\u7b26\u53f7",
"Source code": "\u6e90\u4ee3\u7801",
"Color": "\u989c\u8272",
"Right to left": "\u4ece\u53f3\u5230\u5de6",
"Left to right": "\u4ece\u5de6\u5230\u53f3",
"Emoticons": "\u8868\u60c5",
"Robots": "\u673a\u5668\u4eba",
"Document properties": "\u6587\u6863\u5c5e\u6027",
"Title": "\u6807\u9898",
"Keywords": "\u5173\u952e\u8bcd",
"Encoding": "\u7f16\u7801",
"Description": "\u63cf\u8ff0",
"Author": "\u4f5c\u8005",
"Fullscreen": "\u5168\u5c4f",
"Horizontal line": "\u6c34\u5e73\u5206\u5272\u7ebf",
"Horizontal space": "\u6c34\u5e73\u8fb9\u8ddd",
"Insert\/edit image": "\u63d2\u5165\/\u7f16\u8f91\u56fe\u7247",
"General": "\u666e\u901a",
"Advanced": "\u9ad8\u7ea7",
"Source": "\u5730\u5740",
"Border": "\u8fb9\u6846",
"Constrain proportions": "\u4fdd\u6301\u7eb5\u6a2a\u6bd4",
"Vertical space": "\u5782\u76f4\u8fb9\u8ddd",
"Image description": "\u56fe\u7247\u63cf\u8ff0",
"Style": "\u6837\u5f0f",
"Dimensions": "\u5927\u5c0f",
"Insert image": "\u63d2\u5165\u56fe\u7247",
"Insert date\/time": "\u63d2\u5165\u65e5\u671f\/\u65f6\u95f4",
"Remove link": "\u5220\u9664\u94fe\u63a5",
"Url": "\u5730\u5740",
"Text to display": "\u663e\u793a\u6587\u5b57",
"Anchors": "\u951a\u70b9",
"Insert link": "\u63d2\u5165\u94fe\u63a5",
"New window": "\u5728\u65b0\u7a97\u53e3\u6253\u5f00",
"None": "\u65e0",
"The URL you entered seems to be an external link. Do you want to add the required http:\/\/ prefix?": "\u4f60\u6240\u586b\u5199\u7684URL\u5730\u5740\u5c5e\u4e8e\u5916\u90e8\u94fe\u63a5\uff0c\u9700\u8981\u52a0\u4e0ahttp:\/\/:\u524d\u7f00\u5417\uff1f",
"Target": "\u6253\u5f00\u65b9\u5f0f",
"The URL you entered seems to be an email address. Do you want to add the required mailto: prefix?": "\u4f60\u6240\u586b\u5199\u7684URL\u5730\u5740\u8c8c\u4f3c\u662f\u90ae\u4ef6\u5730\u5740\uff0c\u9700\u8981\u52a0\u4e0amailto:\u524d\u7f00\u5417\uff1f",
"Insert\/edit link": "\u63d2\u5165\/\u7f16\u8f91\u94fe\u63a5",
"Insert\/edit video": "\u63d2\u5165\/\u7f16\u8f91\u89c6\u9891",
"Poster": "\u5c01\u9762",
"Alternative source": "\u955c\u50cf",
"Paste your embed code below:": "\u5c06\u5185\u5d4c\u4ee3\u7801\u7c98\u8d34\u5728\u4e0b\u9762:",
"Insert video": "\u63d2\u5165\u89c6\u9891",
"Embed": "\u5185\u5d4c",
"Nonbreaking space": "\u4e0d\u95f4\u65ad\u7a7a\u683c",
"Page break": "\u5206\u9875\u7b26",
"Paste as text": "\u7c98\u8d34\u4e3a\u6587\u672c",
"Preview": "\u9884\u89c8",
"Print": "\u6253\u5370",
"Save": "\u4fdd\u5b58",
"Could not find the specified string.": "\u672a\u627e\u5230\u641c\u7d22\u5185\u5bb9.",
"Replace": "\u66ff\u6362",
"Next": "\u4e0b\u4e00\u4e2a",
"Whole words": "\u5168\u5b57\u5339\u914d",
"Find and replace": "\u67e5\u627e\u548c\u66ff\u6362",
"Replace with": "\u66ff\u6362\u4e3a",
"Find": "\u67e5\u627e",
"Replace all": "\u5168\u90e8\u66ff\u6362",
"Match case": "\u533a\u5206\u5927\u5c0f\u5199",
"Prev": "\u4e0a\u4e00\u4e2a",
"Spellcheck": "\u62fc\u5199\u68c0\u67e5",
"Finish": "\u5b8c\u6210",
"Ignore all": "\u5168\u90e8\u5ffd\u7565",
"Ignore": "\u5ffd\u7565",
"Add to Dictionary": "\u6dfb\u52a0\u5230\u5b57\u5178",
"Insert row before": "\u5728\u4e0a\u65b9\u63d2\u5165",
"Rows": "\u884c",
"Height": "\u9ad8",
"Paste row after": "\u7c98\u8d34\u5230\u4e0b\u65b9",
"Alignment": "\u5bf9\u9f50\u65b9\u5f0f",
"Border color": "\u8fb9\u6846\u989c\u8272",
"Column group": "\u5217\u7ec4",
"Row": "\u884c",
"Insert column before": "\u5728\u5de6\u4fa7\u63d2\u5165",
"Split cell": "\u62c6\u5206\u5355\u5143\u683c",
"Cell padding": "\u5355\u5143\u683c\u5185\u8fb9\u8ddd",
"Cell spacing": "\u5355\u5143\u683c\u5916\u95f4\u8ddd",
"Row type": "\u884c\u7c7b\u578b",
"Insert table": "\u63d2\u5165\u8868\u683c",
"Body": "\u8868\u4f53",
"Caption": "\u6807\u9898",
"Footer": "\u8868\u5c3e",
"Delete row": "\u5220\u9664\u884c",
"Paste row before": "\u7c98\u8d34\u5230\u4e0a\u65b9",
"Scope": "\u8303\u56f4",
"Delete table": "\u5220\u9664\u8868\u683c",
"H Align": "\u6c34\u5e73\u5bf9\u9f50",
"Top": "\u9876\u90e8\u5bf9\u9f50",
"Header cell": "\u8868\u5934\u5355\u5143\u683c",
"Column": "\u5217",
"Row group": "\u884c\u7ec4",
"Cell": "\u5355\u5143\u683c",
"Middle": "\u5782\u76f4\u5c45\u4e2d",
"Cell type": "\u5355\u5143\u683c\u7c7b\u578b",
"Copy row": "\u590d\u5236\u884c",
"Row properties": "\u884c\u5c5e\u6027",
"Table properties": "\u8868\u683c\u5c5e\u6027",
"Bottom": "\u5e95\u90e8\u5bf9\u9f50",
"V Align": "\u5782\u76f4\u5bf9\u9f50",
"Header": "\u8868\u5934",
"Right": "\u53f3\u5bf9\u9f50",
"Insert column after": "\u5728\u53f3\u4fa7\u63d2\u5165",
"Cols": "\u5217",
"Insert row after": "\u5728\u4e0b\u65b9\u63d2\u5165",
"Width": "\u5bbd",
"Cell properties": "\u5355\u5143\u683c\u5c5e\u6027",
"Left": "\u5de6\u5bf9\u9f50",
"Cut row": "\u526a\u5207\u884c",
"Delete column": "\u5220\u9664\u5217",
"Center": "\u5c45\u4e2d",
"Merge cells": "\u5408\u5e76\u5355\u5143\u683c",
"Insert template": "\u63d2\u5165\u6a21\u677f",
"Templates": "\u6a21\u677f",
"Background color": "\u80cc\u666f\u8272",
"Custom...": "\u81ea\u5b9a\u4e49...",
"Custom color": "\u81ea\u5b9a\u4e49\u989c\u8272",
"No color": "\u65e0",
"Text color": "\u6587\u5b57\u989c\u8272",
"Show blocks": "\u663e\u793a\u533a\u5757\u8fb9\u6846",
"Show invisible characters": "\u663e\u793a\u4e0d\u53ef\u89c1\u5b57\u7b26",
"Words: {0}": "\u5b57\u6570\uff1a{0}",
"Insert": "\u63d2\u5165",
"File": "\u6587\u4ef6",
"Edit": "\u7f16\u8f91",
"Rich Text Area. Press ALT-F9 for menu. Press ALT-F10 for toolbar. Press ALT-0 for help": "\u5728\u7f16\u8f91\u533a\u6309ALT-F9\u6253\u5f00\u83dc\u5355\uff0c\u6309ALT-F10\u6253\u5f00\u5de5\u5177\u680f\uff0c\u6309ALT-0\u67e5\u770b\u5e2e\u52a9",
"Tools": "\u5de5\u5177",
"View": "\u89c6\u56fe",
"Table": "\u8868\u683c",
"Format": "\u683c\u5f0f"
}); | PypiClean |
/Kivy-2.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/kivy/input/providers/probesysfs.py | __all__ = ('ProbeSysfsHardwareProbe', )
import os
from os.path import sep
if 'KIVY_DOC' in os.environ:
ProbeSysfsHardwareProbe = None
else:
import ctypes
from re import match, IGNORECASE
from glob import glob
from subprocess import Popen, PIPE
from kivy.logger import Logger
from kivy.input.provider import MotionEventProvider
from kivy.input.providers.mouse import MouseMotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.config import _is_rpi
EventLoop = None
# See linux/input.h
ABS_MT_POSITION_X = 0x35
_cache_input = None
_cache_xinput = None
class Input(object):
def __init__(self, path):
query_xinput()
self.path = path
@property
def device(self):
base = os.path.basename(self.path)
return os.path.join("/dev", "input", base)
@property
def name(self):
path = os.path.join(self.path, "device", "name")
return read_line(path)
def get_capabilities(self):
path = os.path.join(self.path, "device", "capabilities", "abs")
line = "0"
try:
line = read_line(path)
except (IOError, OSError):
return []
capabilities = []
long_bit = ctypes.sizeof(ctypes.c_long) * 8
for i, word in enumerate(line.split(" ")):
word = int(word, 16)
subcapabilities = [bool(word & 1 << i)
for i in range(long_bit)]
capabilities[:0] = subcapabilities
return capabilities
def has_capability(self, capability):
capabilities = self.get_capabilities()
return len(capabilities) > capability and capabilities[capability]
@property
def is_mouse(self):
return self.device in _cache_xinput
def getout(*args):
try:
return Popen(args, stdout=PIPE).communicate()[0]
except OSError:
return ''
def query_xinput():
global _cache_xinput
if _cache_xinput is None:
_cache_xinput = []
devids = getout('xinput', '--list', '--id-only')
for did in devids.splitlines():
devprops = getout('xinput', '--list-props', did)
evpath = None
for prop in devprops.splitlines():
prop = prop.strip()
if (prop.startswith(b'Device Enabled') and
prop.endswith(b'0')):
evpath = None
break
if prop.startswith(b'Device Node'):
try:
evpath = prop.split('"')[1]
except Exception:
evpath = None
if evpath:
_cache_xinput.append(evpath)
def get_inputs(path):
global _cache_input
if _cache_input is None:
event_glob = os.path.join(path, "event*")
_cache_input = [Input(x) for x in glob(event_glob)]
return _cache_input
def read_line(path):
f = open(path)
try:
return f.readline().strip()
finally:
f.close()
class ProbeSysfsHardwareProbe(MotionEventProvider):
def __new__(self, device, args):
# hack to not return an instance of this provider.
# :)
instance = super(ProbeSysfsHardwareProbe, self).__new__(self)
instance.__init__(device, args)
def __init__(self, device, args):
super(ProbeSysfsHardwareProbe, self).__init__(device, args)
self.provider = 'mtdev'
self.match = None
self.input_path = '/sys/class/input'
self.select_all = True if _is_rpi else False
self.use_mouse = False
self.use_regex = False
self.args = []
args = args.split(',')
for arg in args:
if arg == '':
continue
arg = arg.split('=', 1)
# ensure it's a key = value
if len(arg) != 2:
Logger.error('ProbeSysfs: invalid parameters %s, not'
' key=value format' % arg)
continue
key, value = arg
if key == 'match':
self.match = value
elif key == 'provider':
self.provider = value
elif key == 'use_regex':
self.use_regex = bool(int(value))
elif key == 'select_all':
self.select_all = bool(int(value))
elif key == 'use_mouse':
self.use_mouse = bool(int(value))
elif key == 'param':
self.args.append(value)
else:
Logger.error('ProbeSysfs: unknown %s option' % key)
continue
self.probe()
def should_use_mouse(self):
return (self.use_mouse or
not any(p for p in EventLoop.input_providers
if isinstance(p, MouseMotionEventProvider)))
def probe(self):
global EventLoop
from kivy.base import EventLoop
inputs = get_inputs(self.input_path)
Logger.debug('ProbeSysfs: using probesysfs!')
use_mouse = self.should_use_mouse()
if not self.select_all:
inputs = [x for x in inputs if
x.has_capability(ABS_MT_POSITION_X) and
(use_mouse or not x.is_mouse)]
for device in inputs:
Logger.debug('ProbeSysfs: found device: %s at %s' % (
device.name, device.device))
# must ignore ?
if self.match:
if self.use_regex:
if not match(self.match, device.name, IGNORECASE):
Logger.debug('ProbeSysfs: device not match the'
' rule in config, ignoring.')
continue
else:
if self.match not in device.name:
continue
Logger.info('ProbeSysfs: device match: %s' % device.device)
d = device.device
devicename = self.device % dict(name=d.split(sep)[-1])
provider = MotionEventFactory.get(self.provider)
if provider is None:
Logger.info('ProbeSysfs: Unable to find provider %s' %
self.provider)
Logger.info('ProbeSysfs: fallback on hidinput')
provider = MotionEventFactory.get('hidinput')
if provider is None:
Logger.critical('ProbeSysfs: no input provider found'
' to handle this device !')
continue
instance = provider(devicename, '%s,%s' % (
device.device, ','.join(self.args)))
if instance:
EventLoop.add_input_provider(instance)
MotionEventFactory.register('probesysfs', ProbeSysfsHardwareProbe) | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.