input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
service can identify.
"""
self.languages = languages
@classmethod
def from_dict(cls, _dict: Dict) -> 'IdentifiableLanguages':
"""Initialize a IdentifiableLanguages object from a json dictionary."""
args = {}
if 'languages' in _dict:
args['languages'] = [
IdentifiableLanguage.from_dict(x)
for x in _dict.get('languages')
]
else:
raise ValueError(
'Required property \'languages\' not present in IdentifiableLanguages JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a IdentifiableLanguages object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'languages') and self.languages is not None:
_dict['languages'] = [x.to_dict() for x in self.languages]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this IdentifiableLanguages object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'IdentifiableLanguages') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'IdentifiableLanguages') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class IdentifiedLanguage():
"""
IdentifiedLanguage.
:attr str language: The language code for an identified language.
:attr float confidence: The confidence score for the identified language.
"""
def __init__(self, language: str, confidence: float) -> None:
"""
Initialize a IdentifiedLanguage object.
:param str language: The language code for an identified language.
:param float confidence: The confidence score for the identified language.
"""
self.language = language
self.confidence = confidence
@classmethod
def from_dict(cls, _dict: Dict) -> 'IdentifiedLanguage':
"""Initialize a IdentifiedLanguage object from a json dictionary."""
args = {}
if 'language' in _dict:
args['language'] = _dict.get('language')
else:
raise ValueError(
'Required property \'language\' not present in IdentifiedLanguage JSON'
)
if 'confidence' in _dict:
args['confidence'] = _dict.get('confidence')
else:
raise ValueError(
'Required property \'confidence\' not present in IdentifiedLanguage JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a IdentifiedLanguage object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'language') and self.language is not None:
_dict['language'] = self.language
if hasattr(self, 'confidence') and self.confidence is not None:
_dict['confidence'] = self.confidence
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this IdentifiedLanguage object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'IdentifiedLanguage') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'IdentifiedLanguage') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class IdentifiedLanguages():
"""
IdentifiedLanguages.
:attr List[IdentifiedLanguage] languages: A ranking of identified languages with
confidence scores.
"""
def __init__(self, languages: List['IdentifiedLanguage']) -> None:
"""
Initialize a IdentifiedLanguages object.
:param List[IdentifiedLanguage] languages: A ranking of identified
languages with confidence scores.
"""
self.languages = languages
@classmethod
def from_dict(cls, _dict: Dict) -> 'IdentifiedLanguages':
"""Initialize a IdentifiedLanguages object from a json dictionary."""
args = {}
if 'languages' in _dict:
args['languages'] = [
IdentifiedLanguage.from_dict(x) for x in _dict.get('languages')
]
else:
raise ValueError(
'Required property \'languages\' not present in IdentifiedLanguages JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a IdentifiedLanguages object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'languages') and self.languages is not None:
_dict['languages'] = [x.to_dict() for x in self.languages]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this IdentifiedLanguages object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'IdentifiedLanguages') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'IdentifiedLanguages') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Language():
"""
Response payload for languages.
:attr str language: (optional) The language code for the language (for example,
`af`).
:attr str language_name: (optional) The name of the language in English (for
example, `Afrikaans`).
:attr str native_language_name: (optional) The native name of the language (for
example, `Afrikaans`).
:attr str country_code: (optional) The country code for the language (for
example, `ZA` for South Africa).
:attr bool words_separated: (optional) Indicates whether words of the language
are separated by whitespace: `true` if the words are separated; `false`
otherwise.
:attr str direction: (optional) Indicates the direction of the language:
`right_to_left` or `left_to_right`.
:attr bool supported_as_source: (optional) Indicates whether the language can be
used as the source for translation: `true` if the language can be used as the
source; `false` otherwise.
:attr bool supported_as_target: (optional) Indicates whether the language can be
used as the target for translation: `true` if the language can be used as the
target; `false` otherwise.
:attr bool identifiable: (optional) Indicates whether the language supports
automatic detection: `true` if the language can be detected automatically;
`false` otherwise.
"""
def __init__(self,
*,
language: str = None,
language_name: str = None,
native_language_name: str = None,
country_code: str = None,
words_separated: bool = None,
direction: str = None,
supported_as_source: bool = None,
supported_as_target: bool = None,
identifiable: bool = None) -> None:
"""
Initialize a Language object.
:param str language: (optional) The language code for the language (for
example, `af`).
:param str language_name: (optional) The name of the language in English
(for example, `Afrikaans`).
:param str native_language_name: (optional) The native name of the language
(for example, `Afrikaans`).
:param str country_code: (optional) The country code for the language (for
example, `ZA` for South Africa).
:param bool words_separated: (optional) Indicates whether words of the
language are separated by whitespace: `true` if the words are separated;
`false` otherwise.
:param str direction: (optional) Indicates the direction of the language:
`right_to_left` or `left_to_right`.
:param bool supported_as_source: (optional) Indicates whether the language
can be used as the source for translation: `true` if the language can be
used as the source; `false` otherwise.
:param bool supported_as_target: (optional) Indicates whether the language
can be used as the target for translation: `true` if the language can be
used as the target; `false` otherwise.
:param bool identifiable: (optional) Indicates whether the language
supports automatic detection: `true` if the language can be detected
automatically; `false` otherwise.
"""
self.language = language
self.language_name = language_name
self.native_language_name = native_language_name
self.country_code = country_code
self.words_separated = words_separated
self.direction = direction
self.supported_as_source = supported_as_source
self.supported_as_target = supported_as_target
self.identifiable = identifiable
@classmethod
def from_dict(cls, _dict: Dict) -> 'Language':
"""Initialize a Language object from a json dictionary."""
args = {}
if 'language' in _dict:
args['language'] = _dict.get('language')
if 'language_name' in _dict:
args['language_name'] = _dict.get('language_name')
if 'native_language_name' in _dict:
args['native_language_name'] = _dict.get('native_language_name')
if 'country_code' in _dict:
args['country_code'] = _dict.get('country_code')
if 'words_separated' in _dict:
args['words_separated'] = _dict.get('words_separated')
if 'direction' in _dict:
args['direction'] = _dict.get('direction')
if 'supported_as_source' in _dict:
args['supported_as_source'] = _dict.get('supported_as_source')
if 'supported_as_target' in _dict:
args['supported_as_target'] = _dict.get('supported_as_target')
if 'identifiable' in _dict:
args['identifiable'] = _dict.get('identifiable')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Language object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'language') and self.language is not None:
_dict['language'] = self.language
if hasattr(self, 'language_name') and self.language_name is not None:
_dict['language_name'] = self.language_name
if hasattr(self, 'native_language_name'
) and self.native_language_name is not None:
_dict['native_language_name'] = self.native_language_name
if hasattr(self, 'country_code') and self.country_code is not None:
_dict['country_code'] = self.country_code
if hasattr(self,
'words_separated') and self.words_separated is not None:
_dict['words_separated'] = self.words_separated
if hasattr(self, 'direction') and self.direction is not None:
_dict['direction'] = self.direction
if hasattr(
self,
'supported_as_source') and self.supported_as_source is not None:
_dict['supported_as_source'] = self.supported_as_source
if hasattr(
self,
'supported_as_target') and self.supported_as_target is not None:
_dict['supported_as_target'] = self.supported_as_target
if hasattr(self, 'identifiable') and self.identifiable is not None:
_dict['identifiable'] = self.identifiable
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Language object."""
return | |
weight of the core, e.g. ``self.minw``, the
selector is marked as garbage and will be removed in
:func:`filter_assumps`. Otherwise, the clause is split as
described in [1]_.
"""
# new relaxation variables
self.rels = []
for l in self.core_sels:
if self.wght[l] == self.minw:
# marking variable as being a part of the core
# so that next time it is not used as an assump
self.garbage.add(l)
else:
# do not remove this variable from assumps
# since it has a remaining non-zero weight
self.wght[l] -= self.minw
# reuse assumption variable as relaxation
self.rels.append(-l)
def process_sums(self):
"""
Process cardinality sums participating in a new core.
Whenever necessary, some of the sum assumptions are
removed or split (depending on the value of
``self.minw``). Deleted sums are marked as garbage and are
dealt with in :func:`filter_assumps`.
In some cases, the process involves updating the
right-hand sides of the existing cardinality sums (see the
call to :func:`update_sum`). The overall procedure is
detailed in [1]_.
"""
for l in self.core_sums:
if self.wght[l] == self.minw:
# marking variable as being a part of the core
# so that next time it is not used as an assump
self.garbage.add(l)
else:
# do not remove this variable from assumps
# since it has a remaining non-zero weight
self.wght[l] -= self.minw
# increase bound for the sum
t, b = self.update_sum(l)
# updating bounds and weights
if b < len(t.rhs):
lnew = -t.rhs[b]
if lnew in self.garbage:
self.garbage.remove(lnew)
self.wght[lnew] = 0
if lnew not in self.wght:
self.set_bound(t, b)
else:
self.wght[lnew] += self.minw
# put this assumption to relaxation vars
self.rels.append(-l)
def create_sum(self, bound=1):
"""
Create a totalizer object encoding a cardinality
constraint on the new list of relaxation literals obtained
in :func:`process_sels` and :func:`process_sums`. The
clauses encoding the sum of the relaxation literals are
added to the SAT oracle. The sum of the totalizer object
is encoded up to the value of the input parameter
``bound``, which is set to ``1`` by default.
:param bound: right-hand side for the sum to be created
:type bound: int
:rtype: :class:`.ITotalizer`
Note that if Minicard is used as a SAT oracle, native
cardinality constraints are used instead of
:class:`.ITotalizer`.
"""
if not self.oracle.supports_atmost(): # standard totalizer-based encoding
# new totalizer sum
t = ITotalizer(lits=self.rels, ubound=bound, top_id=self.pool.top)
# updating top variable id
self.pool.top = t.top_id
# adding its clauses to oracle
for cl in t.cnf.clauses:
self.oracle.add_clause(cl)
else:
# for minicard, use native cardinality constraints instead of the
# standard totalizer, i.e. create a new (empty) totalizer sum and
# fill it with the necessary data supported by minicard
t = ITotalizer()
t.lits = self.rels
# a new variable will represent the bound
bvar = self.pool.id()
# proper initial bound
t.rhs = [None] * (len(t.lits))
t.rhs[bound] = bvar
# new atmostb constraint instrumented with
# an implication and represented natively
rhs = len(t.lits)
amb = [[-bvar] * (rhs - bound) + t.lits, rhs]
# add constraint to the solver
self.oracle.add_atmost(*amb)
return t
def update_sum(self, assump):
"""
The method is used to increase the bound for a given
totalizer sum. The totalizer object is identified by the
input parameter ``assump``, which is an assumption literal
associated with the totalizer object.
The method increases the bound for the totalizer sum,
which involves adding the corresponding new clauses to the
internal SAT oracle.
The method returns the totalizer object followed by the
new bound obtained.
:param assump: assumption literal associated with the sum
:type assump: int
:rtype: :class:`.ITotalizer`, int
Note that if Minicard is used as a SAT oracle, native
cardinality constraints are used instead of
:class:`.ITotalizer`.
"""
# getting a totalizer object corresponding to assumption
t = self.tobj[assump]
# increment the current bound
b = self.bnds[assump] + 1
if not self.oracle.supports_atmost(): # the case of standard totalizer encoding
# increasing its bound
t.increase(ubound=b, top_id=self.pool.top)
# updating top variable id
self.pool.top = t.top_id
# adding its clauses to oracle
if t.nof_new:
for cl in t.cnf.clauses[-t.nof_new:]:
self.oracle.add_clause(cl)
else: # the case of cardinality constraints represented natively
# right-hand side is always equal to the number of input literals
rhs = len(t.lits)
if b < rhs:
# creating an additional bound
if not t.rhs[b]:
t.rhs[b] = self.pool.id()
# a new at-most-b constraint
amb = [[-t.rhs[b]] * (rhs - b) + t.lits, rhs]
self.oracle.add_atmost(*amb)
return t, b
def set_bound(self, tobj, rhs):
"""
Given a totalizer sum and its right-hand side to be
enforced, the method creates a new sum assumption literal,
which will be used in the following SAT oracle calls.
:param tobj: totalizer sum
:param rhs: right-hand side
:type tobj: :class:`.ITotalizer`
:type rhs: int
"""
# saving the sum and its weight in a mapping
self.tobj[-tobj.rhs[rhs]] = tobj
self.bnds[-tobj.rhs[rhs]] = rhs
self.wght[-tobj.rhs[rhs]] = self.minw
# adding a new assumption to force the sum to be at most rhs
self.sums.append(-tobj.rhs[rhs])
def filter_assumps(self):
"""
Filter out unnecessary selectors and sums from the list of
assumption literals. The corresponding values are also
removed from the dictionaries of bounds and weights.
Note that assumptions marked as garbage are collected in
the core processing methods, i.e. in :func:`process_core`,
:func:`process_sels`, and :func:`process_sums`.
"""
self.sels = list(filter(lambda x: x not in self.garbage, self.sels))
self.sums = list(filter(lambda x: x not in self.garbage, self.sums))
self.bnds = {l: b for l, b in six.iteritems(self.bnds) if l not in self.garbage}
self.wght = {l: w for l, w in six.iteritems(self.wght) if l not in self.garbage}
self.sels_set.difference_update(set(self.garbage))
self.garbage.clear()
def oracle_time(self):
"""
Report the total SAT solving time.
"""
return self.oracle.time_accum()
def _map_extlit(self, l):
"""
Map an external variable to an internal one if necessary.
This method is used when new clauses are added to the
formula incrementally, which may result in introducing new
variables clashing with the previously used *clause
selectors*. The method makes sure no clash occurs, i.e. it
maps the original variables used in the new problem
clauses to the newly introduced auxiliary variables (see
:func:`add_clause`).
Given an integer literal, a fresh literal is returned. The
returned integer has the same sign as the input literal.
:param l: literal to map
:type l: int
:rtype: int
"""
v = abs(l)
if v in self.vmap.e2i:
return int(copysign(self.vmap.e2i[v], l))
else:
i = self.pool.id()
self.vmap.e2i[v] = i
self.vmap.i2e[i] = v
return int(copysign(i, l))
#
#==============================================================================
class RC2Stratified(RC2, object):
"""
RC2 augmented with BLO and stratification techniques. Although
class :class:`RC2` can deal with weighted formulas, there are
situations when it is necessary to apply additional heuristics
to improve the performance of the solver on weighted MaxSAT
formulas. This class extends capabilities of :class:`RC2` with
two heuristics, namely
1. Boolean lexicographic optimization (BLO) [5]_
2. diversity-based stratification [6]_
3. cluster-based stratification
To specify which heuristics to apply, a user can assign the ``blo``
parameter to one of the values (by default it is set to ``'div'``):
- ``'basic'`` ('BLO' only)
- ``div`` ('BLO' + diversity-based stratification)
- ``cluster`` ('BLO' + cluster-based stratification)
- ``full`` ('BLO' + diversity- + cluster-based stratification)
Except for the aforementioned additional techniques, every other
component of the solver remains as in the base class :class:`RC2`.
Therefore, a user is referred to the documentation of :class:`RC2` for
details.
"""
def __init__(self, formula, solver='g3', adapt=False, blo='div',
exhaust=False, incr=False, minz=False, nohard=False, trim=0,
verbose=0):
"""
Constructor.
"""
# calling the constructor for the basic version
super(RC2Stratified, self).__init__(formula, solver=solver,
adapt=adapt, exhaust=exhaust, incr=incr, minz=minz, trim=trim,
verbose=verbose)
self.levl = 0 # initial optimization level
self.blop = [] # a list of blo levels
# BLO strategy
assert blo and blo in blomap, 'Unknown BLO strategy'
self.bstr = blomap[blo]
# do clause hardening
self.hard = nohard == False
# backing up selectors
self.bckp, self.bckp_set = self.sels, self.sels_set
self.sels = []
# initialize Boolean lexicographic optimization
self.init_wstr()
def init_wstr(self):
"""
Compute and initialize optimization levels for BLO and
stratification. This method is invoked once, from the
constructor of an object of :class:`RC2Stratified`. Given
| |
def _make_date(dattim):
"""Make a date object from GEMPAK DATTIM integer."""
return GempakFile._convert_dattim(dattim).date()
@staticmethod
def _make_time(t):
"""Make a time object from GEMPAK FTIME integer."""
string = '{:04d}'.format(t)
return datetime.strptime(string, '%H%M').time()
def _unpack_real(self, buffer, parameters, length):
"""Unpack floating point data packed in integers.
Similar to DP_UNPK subroutine in GEMPAK.
"""
nparms = len(parameters['name'])
mskpat = 0xffffffff
pwords = (sum(parameters['bits']) - 1) // 32 + 1
npack = (length - 1) // pwords + 1
unpacked = np.ones(npack * nparms, dtype=np.float32) * self.prod_desc.missing_float
if npack * pwords != length:
raise ValueError('Unpacking length mismatch.')
ir = 0
ii = 0
for _i in range(npack):
pdat = buffer[ii:(ii + pwords)]
rdat = unpacked[ir:(ir + nparms)]
itotal = 0
for idata in range(nparms):
scale = 10**parameters['scale'][idata]
offset = parameters['offset'][idata]
bits = parameters['bits'][idata]
isbitc = (itotal % 32) + 1
iswrdc = (itotal // 32)
imissc = self._fortran_ishift(mskpat, bits - 32)
jbit = bits
jsbit = isbitc
jshift = 1 - jsbit
jsword = iswrdc
jword = pdat[jsword]
mask = self._fortran_ishift(mskpat, jbit - 32)
ifield = self._fortran_ishift(jword, jshift)
ifield &= mask
if (jsbit + jbit - 1) > 32:
jword = pdat[jsword + 1]
jshift += 32
iword = self._fortran_ishift(jword, jshift)
iword &= mask
ifield |= iword
if ifield == imissc:
rdat[idata] = self.prod_desc.missing_float
else:
rdat[idata] = (ifield + offset) * scale
itotal += bits
unpacked[ir:(ir + nparms)] = rdat
ir += nparms
ii += pwords
return unpacked.tolist()
class GempakGrid(GempakFile):
"""Subclass of GempakFile specific to GEMPAK gridded data."""
def __init__(self, file, *args, **kwargs):
"""Instantiate GempakGrid object from file."""
super().__init__(file)
datetime_names = ['GDT1', 'GDT2']
level_names = ['GLV1', 'GLV2']
ftime_names = ['GTM1', 'GTM2']
string_names = ['GPM1', 'GPM2', 'GPM3']
# Row Headers
# Based on GEMPAK source, row/col headers have a 0th element in their Fortran arrays.
# This appears to be a flag value to say a header is used or not. 9999
# means its in use, otherwise -9999. GEMPAK allows empty grids, etc., but
# no real need to keep track of that in Python.
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.row_headers_ptr))
self.row_headers = []
row_headers_info = [(key, 'i') for key in self.row_keys]
row_headers_info.extend([(None, None)])
row_headers_fmt = NamedStruct(row_headers_info, self.prefmt, 'RowHeaders')
for _ in range(1, self.prod_desc.rows + 1):
if self._buffer.read_int(4, self.endian, False) == USED_FLAG:
self.row_headers.append(self._buffer.read_struct(row_headers_fmt))
# Column Headers
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.column_headers_ptr))
self.column_headers = []
column_headers_info = [(key, 'i', self._convert_level) if key in level_names
else (key, 'i', self._convert_vertical_coord) if key == 'GVCD'
else (key, 'i', self._convert_dattim) if key in datetime_names
else (key, 'i', self._convert_ftime) if key in ftime_names
else (key, '4s', self._decode_strip) if key in string_names
else (key, 'i')
for key in self.column_keys]
column_headers_info.extend([(None, None)])
column_headers_fmt = NamedStruct(column_headers_info, self.prefmt, 'ColumnHeaders')
for _ in range(1, self.prod_desc.columns + 1):
if self._buffer.read_int(4, self.endian, False) == USED_FLAG:
self.column_headers.append(self._buffer.read_struct(column_headers_fmt))
self._gdinfo = []
for n, head in enumerate(self.column_headers):
self._gdinfo.append(
Grid(
n,
head.GTM1[0],
head.GDT1 + head.GTM1[1],
head.GDT2 + head.GTM2[1] if head.GDT2 and head.GDTM2 else None,
head.GPM1 + head.GPM2 + head.GPM3,
head.GLV1,
head.GLV2,
head.GVCD,
)
)
# Coordinates
if self.navigation_block is not None:
self._get_crs()
self._set_coordinates()
def gdinfo(self):
"""Return grid information."""
return self._gdinfo
def project_point(self, lon, lat):
"""Project geographic corrdinates.
Parameters
----------
lon : float or array-like of float
Longitude of point(s).
lat : float or array-like of float
Latitude of point(s).
Returns
-------
tuple
Tuple containing lists of x and y projected
coordinate values.
"""
return self._transform(lon, lat)
def _get_crs(self):
"""Create CRS from GEMPAK navigation block."""
gemproj = self.navigation_block.projection
if gemproj not in GEMPROJ_TO_PROJ:
raise NotImplementedError('{} projection not implemented.'
.format(gemproj))
proj, ptype = GEMPROJ_TO_PROJ[gemproj]
ellps = 'sphere' # Kept for posterity
earth_radius = 6371200.0 # R takes precedence over ellps
if ptype == 'azm':
lat_0 = self.navigation_block.proj_angle1
lon_0 = self.navigation_block.proj_angle2
rot = self.navigation_block.proj_angle3
if rot != 0:
logger.warning('Rotated projections currently '
'not supported. Angle3 (%7.2f) ignored.', rot)
self.crs = pyproj.CRS.from_dict({'proj': proj,
'lat_0': lat_0,
'lon_0': lon_0,
'ellps': ellps,
'R': earth_radius})
elif ptype == 'cyl':
if gemproj != 'MCD':
lat_0 = self.navigation_block.proj_angle1
lon_0 = self.navigation_block.proj_angle2
rot = self.navigation_block.proj_angle3
if rot != 0:
logger.warning('Rotated projections currently '
'not supported. Angle3 (%7.2f) ignored.', rot)
self.crs = pyproj.CRS.from_dict({'proj': proj,
'lat_0': lat_0,
'lon_0': lon_0,
'ellps': ellps,
'R': earth_radius})
else:
avglat = (self.navigation_block.upper_right_lat
+ self.navigation_block.lower_left_lat) * 0.5
k_0 = (1 / math.cos(avglat)
if self.navigation_block.proj_angle1 == 0
else self.navigation_block.proj_angle1
)
lon_0 = self.navigation_block.proj_angle2
self.crs = pyproj.CRS.from_dict({'proj': proj,
'lat_0': avglat,
'lon_0': lon_0,
'k_0': k_0,
'ellps': ellps,
'R': earth_radius})
elif ptype == 'con':
lat_1 = self.navigation_block.proj_angle1
lon_0 = self.navigation_block.proj_angle2
lat_2 = self.navigation_block.proj_angle3
self.crs = pyproj.CRS.from_dict({'proj': proj,
'lon_0': lon_0,
'lat_1': lat_1,
'lat_2': lat_2,
'ellps': ellps,
'R': earth_radius})
elif ptype == 'obq':
lon_0 = self.navigation_block.proj_angle1
if gemproj == 'UTM':
zone = np.digitize((lon_0 % 360) / 6 + 1, range(1, 61), right=True)
self.crs = pyproj.CRS.from_dict({'proj': proj,
'zone': zone,
'ellps': ellps,
'R': earth_radius})
else:
self.crs = pyproj.CRS.from_dict({'proj': proj,
'lon_0': lon_0,
'ellps': ellps,
'R': earth_radius})
def _set_coordinates(self):
"""Use GEMPAK navigation block to define coordinates.
Defines geographic and projection coordinates for the object.
"""
transform = pyproj.Proj(self.crs)
self._transform = transform
llx, lly = transform(self.navigation_block.lower_left_lon,
self.navigation_block.lower_left_lat)
urx, ury = transform(self.navigation_block.upper_right_lon,
self.navigation_block.upper_right_lat)
self.x = np.linspace(llx, urx, self.kx, dtype=np.float32)
self.y = np.linspace(lly, ury, self.ky, dtype=np.float32)
xx, yy = np.meshgrid(self.x, self.y, copy=False)
self.lon, self.lat = transform(xx, yy, inverse=True)
self.lon = self.lon.astype(np.float32)
self.lat = self.lat.astype(np.float32)
def _unpack_grid(self, packing_type, part):
"""Read raw GEMPAK grid integers and unpack into floats."""
if packing_type == PackingType.none:
lendat = self.data_header_length - part.header_length - 1
if lendat > 1:
buffer_fmt = '{}{}f'.format(self.prefmt, lendat)
buffer = self._buffer.read_struct(struct.Struct(buffer_fmt))
grid = np.zeros(self.ky * self.kx, dtype=np.float32)
grid[...] = buffer
else:
grid = None
return grid
elif packing_type == PackingType.nmc:
raise NotImplementedError('NMC unpacking not supported.')
# integer_meta_fmt = [('bits', 'i'), ('missing_flag', 'i'), ('kxky', 'i')]
# real_meta_fmt = [('reference', 'f'), ('scale', 'f')]
# self.grid_meta_int = self._buffer.read_struct(NamedStruct(integer_meta_fmt,
# self.prefmt,
# 'GridMetaInt'))
# self.grid_meta_real = self._buffer.read_struct(NamedStruct(real_meta_fmt,
# self.prefmt,
# 'GridMetaReal'))
# grid_start = self._buffer.set_mark()
elif packing_type == PackingType.diff:
integer_meta_fmt = [('bits', 'i'), ('missing_flag', 'i'),
('kxky', 'i'), ('kx', 'i')]
real_meta_fmt = [('reference', 'f'), ('scale', 'f'), ('diffmin', 'f')]
self.grid_meta_int = self._buffer.read_struct(NamedStruct(integer_meta_fmt,
self.prefmt,
'GridMetaInt'))
self.grid_meta_real = self._buffer.read_struct(NamedStruct(real_meta_fmt,
self.prefmt,
'GridMetaReal'))
# grid_start = self._buffer.set_mark()
imiss = 2**self.grid_meta_int.bits - 1
lendat = self.data_header_length - part.header_length - 8
packed_buffer_fmt = '{}{}i'.format(self.prefmt, lendat)
packed_buffer = self._buffer.read_struct(struct.Struct(packed_buffer_fmt))
grid = np.zeros((self.ky, self.kx), dtype=np.float32)
if lendat > 1:
iword = 0
ibit = 1
first = True
for j in range(self.ky):
line = False
for i in range(self.kx):
jshft = self.grid_meta_int.bits + ibit - 33
idat = self._fortran_ishift(packed_buffer[iword], jshft)
idat &= imiss
if jshft > 0:
jshft -= 32
idat2 = self._fortran_ishift(packed_buffer[iword + 1], jshft)
idat |= idat2
ibit += self.grid_meta_int.bits
if ibit > 32:
ibit -= 32
iword += 1
if (self.grid_meta_int.missing_flag and idat == imiss):
grid[j, i] = self.prod_desc.missing_float
else:
if first:
grid[j, i] = self.grid_meta_real.reference
psav = self.grid_meta_real.reference
plin = self.grid_meta_real.reference
line = True
first = False
else:
if not line:
grid[j, i] = plin + (self.grid_meta_real.diffmin
+ idat * self.grid_meta_real.scale)
line = True
plin = grid[j, i]
else:
grid[j, i] = psav + (self.grid_meta_real.diffmin
+ idat * self.grid_meta_real.scale)
psav = grid[j, i]
else:
grid = None
return grid
elif packing_type in [PackingType.grib, PackingType.dec]:
integer_meta_fmt = [('bits', 'i'), ('missing_flag', 'i'), ('kxky', 'i')]
real_meta_fmt = [('reference', 'f'), ('scale', 'f')]
self.grid_meta_int = self._buffer.read_struct(NamedStruct(integer_meta_fmt,
self.prefmt,
'GridMetaInt'))
self.grid_meta_real = self._buffer.read_struct(NamedStruct(real_meta_fmt,
self.prefmt,
'GridMetaReal'))
# grid_start = self._buffer.set_mark()
lendat = self.data_header_length - part.header_length - 6
packed_buffer_fmt = '{}{}i'.format(self.prefmt, lendat)
grid = np.zeros(self.grid_meta_int.kxky, dtype=np.float32)
packed_buffer = self._buffer.read_struct(struct.Struct(packed_buffer_fmt))
if lendat > 1:
imax = 2**self.grid_meta_int.bits - 1
ibit = 1
iword = 0
for cell in range(self.grid_meta_int.kxky):
jshft = self.grid_meta_int.bits + ibit - 33
idat = self._fortran_ishift(packed_buffer[iword], jshft)
idat &= imax
if jshft > 0:
jshft -= 32
idat2 = self._fortran_ishift(packed_buffer[iword + 1], jshft)
idat |= idat2
if (idat == imax) and self.grid_meta_int.missing_flag:
grid[cell] = self.prod_desc.missing_float
else:
grid[cell] = (self.grid_meta_real.reference
+ (idat * self.grid_meta_real.scale))
ibit += self.grid_meta_int.bits
if ibit > 32:
ibit -= 32
iword += 1
else:
grid = None
return grid
elif packing_type == PackingType.grib2:
raise NotImplementedError('GRIB2 unpacking not supported.')
# integer_meta_fmt = [('iuscal', 'i'), ('kx', 'i'),
# ('ky', 'i'), ('iscan_mode', 'i')]
| |
<reponame>jaredlunde/cargo-orm
"""
`Cargo ORM Relationships`
--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--
2015 <NAME> © The MIT License (MIT)
http://github.com/jaredlunde
"""
import copy
import importlib
from functools import lru_cache
from pydoc import locate, ErrorDuringImport
from vital.cache import cached_property, memoize
from vital.debug import preprX, get_obj_name
from cargo.fields import *
from cargo.etc.types import *
from cargo.expressions import Clause, safe
from cargo.exceptions import RelationshipImportError, PullError
__all__ = (
'BaseRelationship',
'ForeignKey',
'Relationship',
'Reference'
)
def _import_from(owner_module, string):
obj = locate(string)
if obj is None:
full_string = "{}.{}".format(owner_module, string)
obj = locate(full_string)
if obj is None:
*name, attr = full_string.split(".")
name = ".".join(name)
try:
mod = importlib.import_module(name)
obj = getattr(mod, attr)
except (AttributeError, ImportError):
*name, attr_sup = name.split(".")
name = ".".join(name)
mod = importlib.import_module(name)
obj = getattr(getattr(mod, attr_sup), attr)
return obj
class _ForeignObject(object):
__slots__ = tuple()
def pull(self, *args, dry=False, naked=False, **kwargs):
model = self.ref.model.where(self.ref.field == self.value)
if naked:
model.naked()
if dry:
model.dry()
return model.get(*args, **kwargs)
class BaseRelationship(object):
def _raise_forge_error(self, path, additional=None):
raise RelationshipImportError("Could not import object at {}. {}"
.format(path, additional or ""))
def _find(self, string):
try:
obj = _import_from(self._owner.__module__, string)
except:
self._raise_forge_error(string)
return obj
class Reference(object):
def __init__(self, model, field_name, constraints=None, schema=None):
"""`Reference`
==================================================================
This object is added to :class:ForeignKey fields as
the |ref| property. It provides accessed to the model
and field objects which the foreign key is a reference to.
"""
self._model = model
self._forged = False
self._schema = schema
self.field_name = field_name
self.constraints = constraints or []
__repr__ = preprX('_model', 'field_name')
def __getattr__(self, name):
try:
return self.__getattribute__(name)
except AttributeError:
if not name.startswith('__') and name.endswith('__'):
return self.model.__getattribute__(name)
def __setattr__(self, name, value):
try:
super().__setattr__(name, value)
except AttributeError:
self.model.__setattr__(name, value)
def __setitem__(self, name, value):
self.model[name] = value
def __delitem__(self, name):
del self.model[name]
def __getstate__(self):
""" For pickling """
return self.__dict__.copy()
def add_constraint(self, name, val=None):
""" Adds foreign key constraints to the reference. This is used
primarily with :class:cargo.builder.Builder
@name: (#str) the clause, e.g. |name='MATCH PARTIAL'|
@val: (#str) the clause value,
e.g. |name='on update', val='cascade'|
..
# OPTIONS:
# [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
# [ ON DELETE action ] [ ON UPDATE action ]
field.ref.add_constraint('on delete', 'cascade')
field.ref.add_constraint('on update', 'cascade')
..
"""
if not isinstance(name, Clause):
clause = Clause(name, safe(val) if val is not None else _empty)
else:
clause = name
for c in self.constraints:
if str(c) == str(clause):
return self
self.constraints.append(clause)
return self
def on_update(self, val):
return self.add_constraint('ON UPDATE', val)
def on_delete(self, val):
return self.add_constraint('ON DELETE', val)
@cached_property
def model(self):
""" The referenced :class:Model """
# self._forged = True
return self._model(schema=self._schema)
@cached_property
def field(self):
""" The refenced :class:Field """
field = getattr(self.model, self.field_name)
return field
def copy(self):
cls = self.__class__(self.model, self.field_name, self.constraints)
try:
cls.model = self.model.copy()
except AttributeError:
pass
# cls._forged = self._forged
return cls
__copy__ = copy
class ForeignKeyState(object):
""" State object for pickling and unpickling """
__slots__ = ('args', 'kwargs', 'relation', 'ref')
def __init__(self, args, kwargs, relation, ref):
self.args = args
self.kwargs = kwargs
self.relation = relation
self.ref = ref
@memoize
def get_cls(cls):
class FKey(cls):
def copy(self, *args, **kwargs):
cls = Field.copy(self, *args, **kwargs)
cls.ref = self.ref.copy()
cls._state = self._state
return cls
pull = _ForeignObject.pull
FKey.__name__ = cls.__name__
return FKey
class ForeignKey(BaseRelationship, _ForeignObject):
""" ===============================================================
``Usage Example``
Adds a Foreign Key in Images that references the user id in Users
..
from cargo import *
class Users(Model):
uid = UID()
class Images(Model):
uid = UID()
owner = ForeignKey('Users.uid', index=True, not_null=True,
relation='images')
..
The Foreign Key field contains a reference to its parent. When
the 'relation' keyword argument is provided, the Foreign Key field
will create a :class:Relationship reference back to itself from
its parent using the provided argument value as the attribute name.
..
images = Images()
users = Users()
isinstance(images.owner.ref.field, users.uid)
# True
isinstance(images.owner.ref.model, Users)
# True
isinstance(users.images['owner'], images.owner.__class__)
# True
..
"""
def __init__(self, ref, *args, relation=None, on_delete=None,
on_update=None, **kwargs):
"""`Foreign Keys`
==================================================================
@ref: (#str) python path to the :class:Field which
this foreign key is a reference to e.g.
|coolapp.model.User.uid|
@relation: (#str) attribute name of :class:Relationship to set
in the model which @ref is referencing
@on_delete: (#str) adds ON_DELETE constraint to the foreign key
field
@on_update: (#str) adds ON_UPDATE constraint to the foreign key
field
@*args and @**kwargs will get passed to the the :class:Field
"""
self._owner = None
self._owner_attr = None
self._ref = ref
self._relation = relation
self._on_delete = on_delete
self._on_update = on_update
self._args = args
self._kwargs = kwargs
__repr__ = preprX('_ref')
def __getstate__(self):
return self.__dict__.copy()
def __setstate__(self, dict):
self.__dict__ = dict
def _find_ref(self, string):
""" Finds an object based on @string """
string = string.split(".")
field_name = string[-1]
string = ".".join(string[:-1])
try:
obj = getattr(self._find(string), field_name)
except AttributeError:
self._raise_forge_error(
string,
'Field `{}` not found in `{}`'.format(field_name, string))
if isinstance(obj, Field):
return obj
elif isinstance(obj, ForeignKey):
return obj.get_field()
else:
self._raise_forge_error(string,
'The object found was not a Field.')
@cached_property
def ref(self):
return self._find_ref(self._ref)
@cached_property
def ref_model(self):
return self._find('.'.join(self._ref.split(".")[:-1]))
def get_field(self):
""" Gets the ForeignKeyField object, which inherits the traits of
the owner field. ForeignKeyField provides an attribute
named |ref| containing the :class:Reference to which
the foreign key refers. :class:Reference provides both the
model and the field referenced.
"""
_args, _kwargs = self._args, self._kwargs.copy()
_owner, _owner_attr = self._owner, self._owner_attr
_on_delete, _on_update = self._on_delete, self._on_update
_relation = self._relation
_ref = self._ref
_ref_model, _ref_attr = self.ref_model, _ref.split(".")[-1]
## BULLSHIT STARTS HERE
# _slots = list(_class.__slots__)
# _slots.append('ref')
# _slots.append('_state')
primary = False
if 'primary' in _kwargs:
primary = _kwargs['primary']
del _kwargs['primary']
_class = get_cls(self.ref.__class__)
field = _class(*_args, primary=primary, **_kwargs)
field.table = _owner.table
field.field_name = _owner_attr
field.ref = Reference(_ref_model, _ref_attr,
schema=_owner.schema)
if _on_delete is not None:
field.ref.on_delete(_on_delete)
if _on_update is not None:
field.ref.on_update(_on_update)
field._state = ForeignKeyState(_args, _kwargs, _relation, _ref)
## BULLSHIT ENDS HERE
'''class ForeignKeyField(_class, _ForeignObject):
__slots__ = _slots
__doc__ = _class.__doc__
def __init__(self, *args, **kwargs):
primary = False
if 'primary' in _kwargs:
primary = _kwargs['primary']
del _kwargs['primary']
super().__init__(*_args, primary=primary, **_kwargs)
self.default = _kwargs.get('default')
self.table = _owner.table
self.field_name = _owner_attr
self.ref = Reference(_ref_model, _ref_attr,
schema=_owner.schema)
if _on_delete is not None:
self.ref.on_delete(_on_delete)
if _on_update is not None:
self.ref.on_update(_on_update)
self._state = ForeignKeyState(_args, _kwargs, _relation, _ref)
__repr__ = _class.__repr__
def clear(self):
super().clear()
if self.ref._forged:
self.ref.model.reset()
self.ref.model.reset_fields()
def clear_copy(self):
self.clear()
return self.copy()
def copy(self):
cls = _class.copy(self)
try:
cls.ref = self.ref.copy()
except AttributeError:
cls.ref = Reference(_ref_model, _ref_attr)
cls._state = self._state
return cls
return ForeignKeyField()'''
return field
def _create_relation(self):
""" Creates the :class:Relationship object in the parent model
at the attribute specified in :prop:_relation
"""
_owner = '%s.%s' % (self._owner.__class__.__module__,
self._owner.__class__.__name__)
objname = "%s.%s" % (_owner, self._owner_attr)
setattr(self.ref_model, self._relation, Relationship(objname))
def forge(self, owner, attribute):
""" Called when the @owner :class:Model is initialized. Makes
this relationship 'official' and usable.
@owner: (:class:Model) owner model which is forging this
relationship
@attribute: (#str) name of the attribute in the owner where the
relationship resides
"""
self._owner = owner
self._owner_attr = attribute
# self._forged = True
field = self.get_field()
owner._add_field(field)
# setattr(owner.__class__, field.field_name, field)
# print(getattr(owner.__class__, field.field_name))
if self._relation:
self._create_relation()
return field
def copy(self):
return self.__class__(self._ref, relation=self._relation)
class Relationship(BaseRelationship):
""" ======================================================================
``Usage Example``
Forge a |JOIN| relationship between two models.
..
from cargo import *
class Users(Model):
uid = UID()
images = Relationship('coolapp.model.Images.owner')
class Images(Model):
uid = UID()
owner = ForeignKey('Users.uid')
..
|FROM users JOIN images ON images.owner = users.uid|
|FROM images JOIN users ON users.uid = images.owner|
This is the same as:
..
from cargo import *
class Users(Model):
uid = UID()
class Images(Model):
uid = UID()
owner = ForeignKey('coolapp.model.Users.uid',
relation="images")
..
======================================================================
``Pull data from a relationship``
..
user = Users(uid=1761)
user.pull() # Pulls all relationship information
print(user.images['owner'])
..
|1761|
This is the same as:
..
user = Users(uid=1761)
user.images.pull() # Pulls all relationship information
# for images
print(user.images['owner'])
..
|1761|
"""
def __init__(self, foreign_key):
"""`Relationships`
==================================================================
This class must be used with :class:ForeignKey. It inherits the
the :class:Model which the foreign key belongs to and has access
to all methods and properties of the model. The foreign key can
be accessed through :prop:foreign_key.
@foreign_key: (#str) full python path to the foreign key which
possesses the |JOIN| information to the relationship e.g.
|coolapp.models.Images.owner|
"""
self._owner | |
# Shift the boundaries of the grid for the Galilean frame
if self.use_galilean:
self.shift_galilean_boundaries( 0.5*dt )
# Get the charge density at t = (n+1) dt
self.deposit('rho_next', exchange=(use_true_rho is True))
# Correct the currents (requires rho at t = (n+1) dt )
if correct_currents:
fld.correct_currents( check_exchanges=(self.comm.size > 1) )
if self.comm.size > 1:
# Exchange the guard cells of corrected J between domains
# (If correct_currents is False, the exchange of J
# is done in the function `deposit`)
fld.spect2partial_interp('J')
self.comm.exchange_fields(fld.interp, 'J', 'add')
fld.partial_interp2spect('J')
fld.exchanged_source['J'] = True
# Push the fields E and B on the spectral grid to t = (n+1) dt
fld.push( use_true_rho, check_exchanges=(self.comm.size > 1) )
if correct_divE:
fld.correct_divE()
# Move the grids if needed
if self.comm.moving_win is not None:
# Shift the fields is spectral space and update positions of
# the interpolation grids
self.comm.move_grids(fld, ptcl, dt, self.time)
# Handle boundaries for the E and B fields:
# - MPI exchanges for guard cells
# - Damp fields in damping cells
# - Update the fields in interpolation space
# (needed for the field gathering at the next iteration)
self.exchange_and_damp_EB()
# Increment the global time and iteration
self.time += dt
self.iteration += 1
# Write the checkpoints if needed
for checkpoint in self.checkpoints:
checkpoint.write( self.iteration )
# End of the N iterations
# -----------------------
# Finalize PIC loop
# Get the charge density and the current from spectral space.
fld.spect2interp('J')
if (not fld.exchanged_source['J']) and (self.comm.size > 1):
self.comm.exchange_fields(self.fld.interp, 'J', 'add')
fld.spect2interp('rho_prev')
if (not fld.exchanged_source['rho_prev']) and (self.comm.size > 1):
self.comm.exchange_fields(self.fld.interp, 'rho', 'add')
# Receive simulation data from GPU (if CUDA is used)
if self.use_cuda:
receive_data_from_gpu(self)
# Print the measured time taken by the PIC cycle
if show_progress and (self.comm.rank==0):
progress_bar.print_summary()
def deposit( self, fieldtype, exchange=False,
update_spectral=True, species_list=None ):
"""
Deposit the charge or the currents to the interpolation grid
and then to the spectral grid.
Parameters
----------
fieldtype: str
The designation of the spectral field that
should be changed by the deposition
Either 'rho_prev', 'rho_next' or 'J'
(or 'rho_next_xy' and 'rho_next_z' for cross-deposition)
exchange: bool
Whether to exchange guard cells via MPI before transforming
the fields to the spectral grid. (The corresponding flag in
fld.exchanged_source is set accordingly.)
update_spectral: bool
Whether to update the value of the deposited field in
spectral space.
species_list: list of `Particles` objects, or None
The species which that should deposit their charge/current.
If this is None, all species (and antennas) deposit.
"""
# Shortcut
fld = self.fld
# If no species_list is provided, all species and antennas deposit
if species_list is None:
species_list = self.ptcl
antennas_list = self.laser_antennas
else:
# Otherwise only the specified species deposit
antennas_list = []
# Deposit charge or currents on the interpolation grid
# Charge
if fieldtype.startswith('rho'): # e.g. rho_next, rho_prev, etc.
fld.erase('rho')
# Deposit the particle charge
for species in species_list:
species.deposit( fld, 'rho' )
# Deposit the charge of the virtual particles in the antenna
for antenna in antennas_list:
antenna.deposit( fld, 'rho' )
# Sum contribution from each CPU threads (skipped on GPU)
fld.sum_reduce_deposition_array('rho')
# Divide by cell volume
fld.divide_by_volume('rho')
# Exchange guard cells if requested by the user
if exchange and self.comm.size > 1:
self.comm.exchange_fields(fld.interp, 'rho', 'add')
# Currents
elif fieldtype == 'J':
fld.erase('J')
# Deposit the particle current
for species in species_list:
species.deposit( fld, 'J' )
# Deposit the current of the virtual particles in the antenna
for antenna in antennas_list:
antenna.deposit( fld, 'J' )
# Sum contribution from each CPU threads (skipped on GPU)
fld.sum_reduce_deposition_array('J')
# Divide by cell volume
fld.divide_by_volume('J')
# Exchange guard cells if requested by the user
if exchange and self.comm.size > 1:
self.comm.exchange_fields(fld.interp, 'J', 'add')
else:
raise ValueError('Unknown fieldtype: %s' %fieldtype)
# Get the charge or currents on the spectral grid
if update_spectral:
fld.interp2spect( fieldtype )
if self.filter_currents:
fld.filter_spect( fieldtype )
# Set the flag to indicate whether these fields have been exchanged
fld.exchanged_source[ fieldtype ] = exchange
def cross_deposit( self, move_positions ):
"""
Perform cross-deposition. This function should be called
when the particles are at time n+1/2.
Parameters
----------
move_positions:bool
Whether to move the positions of regular particles
"""
dt = self.dt
# Push the particles: z[n+1/2], x[n+1/2] => z[n], x[n+1]
if move_positions:
for species in self.ptcl:
species.push_x( 0.5*dt, x_push= 1., y_push= 1., z_push= -1. )
for antenna in self.laser_antennas:
antenna.push_x( 0.5*dt, x_push= 1., y_push= 1., z_push= -1. )
# Shift the boundaries of the grid for the Galilean frame
if self.use_galilean:
self.shift_galilean_boundaries( -0.5*dt )
# Deposit rho_next_xy
self.deposit( 'rho_next_xy' )
# Push the particles: z[n], x[n+1] => z[n+1], x[n]
if move_positions:
for species in self.ptcl:
species.push_x(dt, x_push= -1., y_push= -1., z_push= 1.)
for antenna in self.laser_antennas:
antenna.push_x(dt, x_push= -1., y_push= -1., z_push= 1.)
# Shift the boundaries of the grid for the Galilean frame
if self.use_galilean:
self.shift_galilean_boundaries( dt )
# Deposit rho_next_z
self.deposit( 'rho_next_z' )
# Push the particles: z[n+1], x[n] => z[n+1/2], x[n+1/2]
if move_positions:
for species in self.ptcl:
species.push_x(0.5*dt, x_push= 1., y_push= 1., z_push= -1.)
for antenna in self.laser_antennas:
antenna.push_x(0.5*dt, x_push= 1., y_push= 1., z_push= -1.)
# Shift the boundaries of the grid for the Galilean frame
if self.use_galilean:
self.shift_galilean_boundaries( -0.5*dt )
def exchange_and_damp_EB(self):
"""
Handle boundaries for the E and B fields:
- MPI exchanges for guard cells
- Damp fields in damping cells (in z, and in r if PML are used)
- Update the fields in interpolation space
"""
# Shortcut
fld = self.fld
# - Get fields in interpolation space (or partial interpolation space)
# to prepare for damp/exchange
if self.use_pml:
# Exchange/damp operation in z and r ; do full transform
fld.spect2interp('E')
fld.spect2interp('B')
fld.spect2interp('E_pml')
fld.spect2interp('B_pml')
else:
# Exchange/damp operation is purely along z; spectral fields
# are updated by doing an iFFT/FFT instead of a full transform
fld.spect2partial_interp('E')
fld.spect2partial_interp('B')
# - Exchange guard cells and damp fields
self.comm.exchange_fields(fld.interp, 'E', 'replace')
self.comm.exchange_fields(fld.interp, 'B', 'replace')
self.comm.damp_EB_open_boundary( fld.interp ) # Damp along z
if self.use_pml:
self.comm.damp_pml_EB( fld.interp ) # Damp in radial PML
# - Update spectral space (and interpolation space if needed)
if self.use_pml:
# Exchange/damp operation in z and r ; do full transform back
fld.interp2spect('E')
fld.interp2spect('B')
fld.interp2spect('E_pml')
fld.interp2spect('B_pml')
else:
# Exchange/damp operation is purely along z; spectral fields
# are updated by doing an iFFT/FFT instead of a full transform
fld.partial_interp2spect('E')
fld.partial_interp2spect('B')
# Get the corresponding fields in interpolation space
fld.spect2interp('E')
fld.spect2interp('B')
def shift_galilean_boundaries(self, dt):
"""
Shift the interpolation grids by v_comoving * dt.
(The field arrays are unchanged, only position attributes are changed.)
With the Galilean frame, in principle everything should
be solved in variables xi = z - v_comoving t, and -v_comoving
should be added to the motion of the particles. However, it
is equivalent to, instead, shift the boundaries of the grid.
"""
# Calculate shift distance over a half timestep
shift_distance = self.v_comoving * dt
# Shift the boundaries of the global domain
self.comm.shift_global_domain_positions( shift_distance )
# Shift the boundaries of the grid
for m in range(self.fld.Nm):
self.fld.interp[m].zmin += shift_distance
self.fld.interp[m].zmax += shift_distance
def add_new_species( self, q, m, n=None, dens_func=None,
p_nz=None, p_nr=None, p_nt=None,
p_zmin=-np.inf, p_zmax=np.inf,
p_rmin=0, p_rmax=np.inf,
uz_m=0., ux_m=0., uy_m=0.,
uz_th=0., ux_th=0., uy_th=0.,
continuous_injection=True ):
"""
Create a new species (i.e. an instance of `Particles`) with
charge `q` and mass `m`. Add it to the simulation (i.e. to the list
`Simulation.ptcl`), and return it, so that the methods of the
`Particles` class can be used, for this particular species.
In addition, if `n` is set, then new macroparticles will be created
within this species (in an evenly-spaced manner).
For boosted-frame simulations (i.e. where `gamma_boost`
as been passed to the `Simulation` object), all quantities that
are explicitly mentioned to be in the lab frame below are
automatically converted to the boosted frame.
.. note::
For the arguments below, it is recommended to have at least
``p_nt = 4*Nm`` (except in the case ``Nm=1``, for which
``p_nt=1`` | |
<filename>Database/DB_dev.py<gh_stars>1-10
import pprint
pp = pprint.PrettyPrinter(indent=3 ,depth=6,width=100)
import pickle
def add_compsdata():
comps = { "comps" :{
"Faculty" : {
"Dr.<NAME> is the Head of Department"
"The Computer Department has a total of 30 faculty members consisting of highly skilled professors and assistant professors"
"For more information you can visit the given link!"
"<a href='https://kjsce.somaiya.edu/en/contact-us/faculty-directory'></a>"
#"https://kjsce.somaiya.edu/en/contact-us/faculty-directory"
},
"infrastructure" :{
"The Department houses classrooms and laboratories with modern infrastructure to facilitate their learning. The Teaching-Learning-Evaluation paradigm is a mix of traditional as well as active learning pedagogy and use of contemporary Information and communications technology (ICT) tools. The academic ambience encourages research, development and innovation activities."
"For detailed information please visit the following link!"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-computer-engineering'>Infrastructure</a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-computer-engineering"
},
"syllabus" : {
"The syllabus is designed having wide choice for branch specific electives and more number of open or interdisciplinary electives."
"Extensive use of Open Sources Technologies for Teaching – Learning – Evaluation"
"Choice based Audit Courses, Add on Credit Courses, Add on Audit Courses, Exposure Courses, etc"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-computer-engineering'>syllabus</a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-computer-engineering"
},
"placement" : {
"The Placement Process at KJSCE acts as a link between the expectations of the Recruiters with the dreams of the students. "
"In year 2019-2020, we had 84 Companies visiting for recruitment."
"The highest package received was Rs.18.75 lpa and overall average of Rs.5.55 lpa with over 600+ successfull placements."
"For more information please visit the following link!"
"<a href='https://kjsce.somaiya.edu/en/placement/overview'>placement</a>"
#"https://kjsce.somaiya.edu/en/placement/overview"
},
"student" : {
"We at the K J Somaiya College of Engineering continuously strive to excel in academic along with various Co-curricular and extra-curricular activities. Extracurricular activities are not the components of the academic curriculum but an integral part of the educational environment for the intellectual development of an engineer. It will explore hidden talent inside you, it will make you more adaptive and flexible and help you to become a smart engineer not only just an engineering graduate."
"Codecell - KJSCE Codecell, a Codechef campus chapter was founded in 2014. The main goals of K J Somaiya College of Engineering Codecell are to promote a culture of competitive coding and to improve participation of the college in the prestigious competition ACM ICPC."
"CSI - The Computer Society of India established in the year 1965 is today the largest IT professionals’ society in India. Keeping in mind the interest of IT professionals & computer users, CSI works towards making a profession an area of choice among all sections of society."
"For information regarding all the councils and student organisations please visit the following link:"
"<a href='https://kjsce.somaiya.edu/en/students-association'>student_body</a>"
#"https://kjsce.somaiya.edu/en/students-association"
},
"programes":{
"Computer engineers are involved in the design of Computer-based systems to address highly specialized and specific application needs. The Department of Computer Engineering facilitates students to apply principles of Computer technology across a wide variety of fields for adapting to ever changing multidisciplinary converging technologies."
"For detailed information about the programmes offered please visit the following link:"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-computer-engineering'>programes</a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-computer-engineering"
},
"admissions" :{
"For admission , intrested student are required to give Somaiya Entrance Test-Engineering(SET-E)."
"We are delighted that you are considering an B.Tech programme at K J Somaiya College of Engineering. Please read all instructions and requirements carefully to ensure a complete and correct application on the provided link:"
"<a href='https://kjsce.somaiya.edu/en/admission/btech'>admission</a>"
#"https://kjsce.somaiya.edu/en/admission/btech"
"Feel free to contact us on <EMAIL> in case of any queries."
}
}
}
return comps
def add_etrxdata():
ETRX = { "etrx" :{
"Faculty" :{
"Dr.<NAME> is the Head of Department."
"The Electronics Department has a total of 28 faculty members consisting of highly skilled professors and assistant professors who possess proficiency in both hardware and software areas. "
"For more information you can visit the given link :- "
"<a href='https://kjsce.somaiya.edu/en/contact-us/faculty-directory'>Faculty</a>"
},
"infrastructure" :{
"The electronics department has multiple fully equipped laborataries with state of art instruments for the students to have hands on experience in domains like Automation, Robotics,Machine Learning, Embedded systems, IOT with no hardware limitations."
"For detailed information please visit the following link!"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-electronics-engineering'>infrastructure</a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-electronics-engineering"
},
"syllabus" : {
"The syllabus of Electronics department has a focused curriculum with a rigor towards hands-on experience and industry exposure. The syllabus is designed in consultation with Industrial experts and academia, to address the current industrial and social needs."
"Facility Centre with Festo for Industrial Automation and with National Instruments LabView are the key elements."
"Curriculum mapped to address current challenges with industry, designed through academia, industry and students participation"
"Focuses on a wide variety of core courses, electives including cutting edge technologies like Artificial Intelligence, Big Data Analytics, Machine Learning, IoT and more."
"For information about the detailed structure of syllabus and the subjects included, visit the link provided below!"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-electronics-engineering'>syllabus</a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-electronics-engineering"
},
"placement" : {
"The Placement Process at KJSCE acts as a link between the expectations of the Recruiters with the dreams of the students. "
"In year 2019-2020, we had 84 Companies visiting for recruitment."
"The highest package received was Rs.18.75 lpa and overall average of Rs.5.55 lpa with over 600+ successfull placements."
"For more information please visit the following link!"
"<a href='https://kjsce.somaiya.edu/en/placement/overview'>placement</a>"
#"https://kjsce.somaiya.edu/en/placement/overview"
},
"student" : {
"We at the K J Somaiya College of Engineering continuously strive to excel in academic along with various Co-curricular and extra-curricular activities. Extracurricular activities are not the components of the academic curriculum but an integral part of the educational environment for the intellectual development of an engineer. It will explore hidden talent inside you, it will make you more adaptive and flexible and help you to become a smart engineer not only just an engineering graduate."
"EESA - The Electronics Engineers Students’ Association was established in the year 1988. The association promotes activities such as paper presentations, quizzes, seminars, etc.It arranges for preparatory guidance lectures and also conducts mock tests and group discussions for CAT, GRE etc."
"ISTE - The ISTE Student Chapter (MH 60) was established in 2000-2001 and it was inaugurated on Wednesday, 24th January 2001 by the Chief guest <NAME>, Principal, MGM’s JNCOE, Aurangabad."
"For information regarding all the councils and student organisations please visit the following link:"
"<a href='https://kjsce.somaiya.edu/en/students-association'>student body</a>"
#"https://kjsce.somaiya.edu/en/students-association"
},
"programes" :{
"The curriculum offers a variety of audit courses, multi-disciplinary and core Electives. Faculty Members are trained in ICT Tools, pedagogy features, and innovative assessment methods along with sound technical base."
"Students gather a variety of learning experiences, where concepts are taught to the students through simulations, models, peer learning, personalized adaptive learning, etc. transforming students into industry-ready, competent Professionals."
"For more details please visit the following link!"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-electronics-engineering'>programes</a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-electronics-engineering"
},
"admissions" :{
"For admission , intrested student are required to give Somaiya Entrance Test-Engineering(SET-E)."
"We are delighted that you are considering an B.Tech programme at K J Somaiya College of Engineering. Please read all instructions and requirements carefully to ensure a complete and correct application on the provided link:"
#"https://kjsce.somaiya.edu/en/admission/btech"
"<a href='https://kjsce.somaiya.edu/en/admission/btech'>admission</a>"
"Feel free to contact us on <EMAIL> in case of any queries."
}
}
}
return ETRX
def add_extcdata():
EXTC = {"extc" :{
'Faculty' : {
"Dr. <NAME> is the Head of Department."
"The EXTC departement has a total of 29 faculty members consisting of highly skilled professors and assistant professors who possess proficiency in both hardware and software areas."
"For more information you can visit the given link!"
"<a href='https://kjsce.somaiya.edu/en/contact-us/faculty-directory'>faculty</a>"
#"https://kjsce.somaiya.edu/en/contact-us/faculty-directory"
},
"infrastructure": {
"The department of electronics and telecommunications has multiple fully equipped laborataries with state of art instruments"
"Hands on learning through laboratory experiments to develop knowledge, skills, and values from direct experiences beyond traditional academic course."
"For more information please visit the following link!"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-electronic-telecommunication'>infrastructure</a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-electronic-telecommunication"
},
"syllabus" : {
"The curriculum is designed to develop technical and interpersonal skills offering wide choice for every learner."
"Curriculum mapped to | |
from .Address import Address
from .Authorization import Authorization
from .Avatar import Avatar
from .BankAccount import BankAccount
from .Contract import Contract
from .DigitalAssetAddress import DigitalAssetAddress
from .EmailAddress import EmailAddress
from .Error import Error
from .JoinOrganizationInvitation import JoinOrganizationInvitation
from .KeyStoreKey import KeyStoreKey
from .Notification import Notification
from .PhoneNumberValidation import PhoneNumberValidation
from .Phonenumber import Phonenumber
from .PublicKey import PublicKey
from .RegistryEntry import RegistryEntry
from .See import See
from .SeeView import SeeView
from .TOTPSecret import TOTPSecret
from .TwoFAMethods import TwoFAMethods
from .User import User
from .UserAPIKey import UserAPIKey
from .UserOrganizations import UserOrganizations
from .api_response import APIResponse
from .unhandled_api_error import UnhandledAPIError
from .unmarshall_error import UnmarshallError
from .userview import userview
from Jumpscale import j
class UsersService:
def __init__(self, client):
pass
self.client = client
def GetAvatarImage(self, hash, headers=None, query_params=None, content_type="application/json"):
"""
Get the avatar file associated with this id
It is method for GET /users/avatar/img/{hash}
"""
uri = self.client.base_url + "/users/avatar/img/" + hash
return self.client.get(uri, None, headers, query_params, content_type)
def DeleteUserAddress(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Removes an address
It is method for DELETE /users/{username}/addresses/{label}
"""
uri = self.client.base_url + "/users/" + username + "/addresses/" + label
return self.client.delete(uri, None, headers, query_params, content_type)
def GetUserAddressByLabel(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Get the details of an address.
It is method for GET /users/{username}/addresses/{label}
"""
uri = self.client.base_url + "/users/" + username + "/addresses/" + label
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=Address(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateUserAddress(
self, data, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Update the label and/or value of an existing address.
It is method for PUT /users/{username}/addresses/{label}
"""
uri = self.client.base_url + "/users/" + username + "/addresses/" + label
return self.client.put(uri, data, headers, query_params, content_type)
def GetUserAddresses(self, username, headers=None, query_params=None, content_type="application/json"):
"""
List of all of the user his addresses.
It is method for GET /users/{username}/addresses
"""
uri = self.client.base_url + "/users/" + username + "/addresses"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Address(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def RegisterNewUserAddress(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Register a new address
It is method for POST /users/{username}/addresses
"""
uri = self.client.base_url + "/users/" + username + "/addresses"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=Address(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeleteAPIkey(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Removes an API key
It is method for DELETE /users/{username}/apikeys/{label}
"""
uri = self.client.base_url + "/users/" + username + "/apikeys/" + label
return self.client.delete(uri, None, headers, query_params, content_type)
def GetAPIkey(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Get an API key by label
It is method for GET /users/{username}/apikeys/{label}
"""
uri = self.client.base_url + "/users/" + username + "/apikeys/" + label
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=UserAPIKey(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateAPIkey(self, data, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Updates the label for the API key
It is method for PUT /users/{username}/apikeys/{label}
"""
uri = self.client.base_url + "/users/" + username + "/apikeys/" + label
return self.client.put(uri, data, headers, query_params, content_type)
def ListAPIKeys(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Lists the API keys
It is method for GET /users/{username}/apikeys
"""
uri = self.client.base_url + "/users/" + username + "/apikeys"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(UserAPIKey(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def AddApiKey(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Adds an APIKey to the user
It is method for POST /users/{username}/apikeys
"""
uri = self.client.base_url + "/users/" + username + "/apikeys"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=UserAPIKey(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeleteAuthorization(
self, grantedTo, username, headers=None, query_params=None, content_type="application/json"
):
"""
Remove the authorization for an organization, the granted organization will no longer have access the user's information.
It is method for DELETE /users/{username}/authorizations/{grantedTo}
"""
uri = self.client.base_url + "/users/" + username + "/authorizations/" + grantedTo
return self.client.delete(uri, None, headers, query_params, content_type)
def GetAuthorization(self, grantedTo, username, headers=None, query_params=None, content_type="application/json"):
"""
Get the authorization for a specific organization.
It is method for GET /users/{username}/authorizations/{grantedTo}
"""
uri = self.client.base_url + "/users/" + username + "/authorizations/" + grantedTo
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=Authorization(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateAuthorization(
self, data, grantedTo, username, headers=None, query_params=None, content_type="application/json"
):
"""
Modify which information an organization is able to see.
It is method for PUT /users/{username}/authorizations/{grantedTo}
"""
uri = self.client.base_url + "/users/" + username + "/authorizations/" + grantedTo
return self.client.put(uri, data, headers, query_params, content_type)
def GetAllAuthorizations(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get the list of authorizations.
It is method for GET /users/{username}/authorizations
"""
uri = self.client.base_url + "/users/" + username + "/authorizations"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Authorization(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def CreateAvatarFromImage(
self, data, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Create a new avatar with the specified label from a provided image file
It is method for POST /users/{username}/avatar/img/{label}
"""
uri = self.client.base_url + "/users/" + username + "/avatar/img/" + label
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=Avatar(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateAvatarFile(
self, data, newlabel, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Update the avatar and possibly the avatar file stored on itsyou.online
It is method for PUT /users/{username}/avatar/{label}/to/{newlabel}
"""
uri = self.client.base_url + "/users/" + username + "/avatar/" + label + "/to/" + newlabel
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=Avatar(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeleteAvatar(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Delete the avatar with the specified label
It is method for DELETE /users/{username}/avatar/{label}
"""
uri = self.client.base_url + "/users/" + username + "/avatar/" + label
return self.client.delete(uri, None, headers, query_params, content_type)
def UpdateAvatarLink(self, data, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Update the avatar and possibly the link to the avatar
It is method for PUT /users/{username}/avatar/{label}
"""
uri = self.client.base_url + "/users/" + username + "/avatar/" + label
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=Avatar(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
| |
self.assertRaises(ValueError):
bond.l_monomer = 'a'
with self.assertRaises(ValueError):
bond.l_monomer = -2
bond.r_monomer = 2
with self.assertRaises(ValueError):
bond.r_monomer = 'a'
with self.assertRaises(ValueError):
bond.r_monomer = -2
def test_is_equal(self):
bond_1 = core.OntoBond()
bond_2 = core.OntoBond()
bond_3 = core.OntoBond(type=core.Bond())
bond_4 = core.OntoBond(l_monomer=2)
bond_5 = core.OntoBond(r_monomer=2)
self.assertTrue(bond_1.is_equal(bond_1))
self.assertTrue(bond_1.is_equal(bond_2))
self.assertFalse(bond_1.is_equal('bond'))
self.assertFalse(bond_1.is_equal(bond_3))
self.assertFalse(bond_1.is_equal(bond_4))
self.assertFalse(bond_1.is_equal(bond_5))
class BondSetTestCase(unittest.TestCase):
def test_add(self):
bonds = core.BondSet()
bond_1 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'H')],
r_bond_atoms=[core.Atom(core.Monomer, 'O')])
bond_2 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'N')],
r_bond_atoms=[core.Atom(core.Monomer, 'P')])
bond_3 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'S')],
r_bond_atoms=[core.Atom(core.Monomer, 'Na')])
bonds.add(bond_1)
bonds.add(bond_2)
self.assertEqual(len(bonds), 2)
self.assertIn(bond_1, bonds)
self.assertIn(bond_2, bonds)
self.assertNotIn(bond_3, bonds)
with self.assertRaisesRegex(ValueError, '`bond` must be an instance of `BondBase`'):
bonds.add(None)
def test_update(self):
bonds = core.BondSet()
bond_1 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'H')],
r_bond_atoms=[core.Atom(core.Monomer, 'O')])
bond_2 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'N')],
r_bond_atoms=[core.Atom(core.Monomer, 'P')])
bond_3 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'S')],
r_bond_atoms=[core.Atom(core.Monomer, 'Na')])
bonds.update(set([bond_1, bond_2]))
self.assertEqual(len(bonds), 2)
self.assertIn(bond_1, bonds)
self.assertIn(bond_2, bonds)
self.assertNotIn(bond_3, bonds)
def test_symmetric_difference_update(self):
bonds_1 = core.BondSet()
bonds_2 = core.BondSet()
bond_1 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'H')],
r_bond_atoms=[core.Atom(core.Monomer, 'O')])
bond_2 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'N')],
r_bond_atoms=[core.Atom(core.Monomer, 'P')])
bond_3 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'S')],
r_bond_atoms=[core.Atom(core.Monomer, 'Na')])
bonds_1.update(set([bond_1, bond_2]))
bonds_2.update(set([bond_1, bond_3]))
bonds_1.symmetric_difference_update(bonds_2)
self.assertEqual(bonds_1, core.BondSet([bond_2, bond_3]))
def test_is_equal(self):
bonds_1 = core.BondSet()
bonds_2 = core.BondSet()
bonds_3 = core.BondSet()
bond_1 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'H')],
r_bond_atoms=[core.Atom(core.Monomer, 'O')])
bond_2 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'N')],
r_bond_atoms=[core.Atom(core.Monomer, 'P')])
bond_3 = core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'S')],
r_bond_atoms=[core.Atom(core.Monomer, 'Na')])
bonds_1.update(set([bond_1, bond_2]))
bonds_2.update(set([bond_1, bond_2]))
bonds_3.update(set([bond_1, bond_3]))
self.assertTrue(bonds_1.is_equal(bonds_1))
self.assertTrue(bonds_1.is_equal(bonds_2))
self.assertTrue(bonds_2.is_equal(bonds_1))
self.assertFalse(bonds_1.is_equal(bonds_3))
self.assertFalse(bonds_3.is_equal(bonds_1))
self.assertFalse(bonds_1.is_equal(set()))
class NickTestCase(unittest.TestCase):
def test_init(self):
nick = core.Nick(position=3)
self.assertEqual(nick.position, 3)
def test_get_set_position(self):
nick = core.Nick()
nick.position = 4
self.assertEqual(nick.position, 4)
with self.assertRaises(ValueError):
nick.position = 4.1
def test_is_equal(self):
nick1 = core.Nick()
nick2 = core.Nick()
nick3 = core.Nick(position=3)
nick4 = core.Nick(position=3)
nick5 = core.Nick(position=4)
self.assertTrue(nick1.is_equal(nick1))
self.assertTrue(nick1.is_equal(nick2))
self.assertFalse(nick1.is_equal(nick3))
self.assertTrue(nick3.is_equal(nick4))
self.assertFalse(nick3.is_equal(nick5))
class NickSetTestCase(unittest.TestCase):
def test_add(self):
nick_set = core.NickSet()
nick = core.Nick()
nick_set.add(nick)
self.assertIn(nick, nick_set)
with self.assertRaisesRegex(ValueError, 'must be an instance of `Nick`'):
nick_set.add(9)
def test_update(self):
nick_set = core.NickSet()
nicks = [core.Nick(position=3), core.Nick(position=5), core.Nick(position=7)]
nick_set.update(nicks[0:2])
self.assertIn(nicks[0], nick_set)
self.assertIn(nicks[1], nick_set)
self.assertNotIn(nicks[2], nick_set)
def test_symmetric_difference_update(self):
nick_1 = core.Nick(position=3)
nick_2 = core.Nick(position=5)
nick_3 = core.Nick(position=7)
nicks_1 = core.NickSet([nick_1, nick_2])
nicks_2 = core.NickSet([nick_1, nick_3])
nicks_1.symmetric_difference_update(nicks_2)
self.assertEqual(nicks_1, core.NickSet([nick_2, nick_3]))
def test_is_equal(self):
nick_1 = core.Nick(position=3)
nick_2 = core.Nick(position=5)
nick_3 = core.Nick(position=7)
nicks_1 = core.NickSet([nick_1, nick_2])
nicks_2 = core.NickSet([nick_1, nick_2])
nicks_3 = core.NickSet([nick_1, nick_3])
self.assertTrue(nicks_1.is_equal(nicks_1))
self.assertTrue(nicks_1.is_equal(nicks_2))
self.assertFalse(nicks_1.is_equal(nicks_3))
self.assertFalse(nicks_1.is_equal(set()))
class BpFormTestCase(unittest.TestCase):
def test_init(self):
bp_form = core.BpForm()
self.assertEqual(bp_form.seq, core.MonomerSequence())
self.assertEqual(bp_form.alphabet.monomers, {})
self.assertEqual(bp_form.backbone.get_formula(), EmpiricalFormula())
self.assertEqual(bp_form.backbone.get_charge(), 0)
self.assertEqual(bp_form.bond.get_formula(), EmpiricalFormula())
self.assertEqual(bp_form.bond.get_charge(), 0)
def test_set_monomer_seq(self):
bp_form = core.BpForm()
bp_form.seq = core.MonomerSequence()
self.assertEqual(len(bp_form.seq), 0)
bp_form.seq = [core.Monomer(), core.Monomer()]
self.assertIsInstance(bp_form.seq, core.MonomerSequence)
self.assertEqual(len(bp_form.seq), 2)
with self.assertRaises(ValueError):
bp_form.seq = None
with self.assertRaises(ValueError):
bp_form.seq = 'A'
def test_set_alphabet(self):
bp_form = core.BpForm()
bp_form.alphabet = dna.canonical_dna_alphabet
self.assertEqual(len(bp_form.alphabet.monomers), 6)
with self.assertRaises(ValueError):
bp_form.alphabet = None
with self.assertRaises(ValueError):
bp_form.alphabet = 'A'
def test_set_backbone(self):
bp_form = core.BpForm()
bp_form.backbone = core.Backbone()
with self.assertRaises(ValueError):
bp_form.backbone = None
with self.assertRaises(ValueError):
bp_form.backbone = '123'
def test_set_bond(self):
bp_form = core.BpForm()
bp_form.bond = core.Bond()
with self.assertRaises(ValueError):
bp_form.bond = None
with self.assertRaises(ValueError):
bp_form.bond = '123'
def test_set_circular(self):
bp_form = core.BpForm()
bp_form.circular = True
self.assertEqual(bp_form.circular, True)
bp_form.circular = False
self.assertEqual(bp_form.circular, False)
with self.assertRaises(ValueError):
bp_form.circular = None
def test_set_crosslinks(self):
bp_form = core.BpForm()
bp_form.crosslinks = core.BondSet()
with self.assertRaises(ValueError):
bp_form.crosslinks = None
def test_get_set_nicks(self):
bp_form = core.BpForm()
nicks = core.NickSet()
bp_form.nicks = nicks
self.assertEqual(bp_form.nicks, nicks)
with self.assertRaises(ValueError):
bp_form.nicks = None
def test_is_equal(self):
bp_form_1 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]))
bp_form_2 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]))
bp_form_3 = None
bp_form_4 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]), alphabet=dna.canonical_dna_alphabet)
bp_form_5 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]), backbone=core.Backbone(structure='O'))
bp_form_6 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]), bond=core.Bond(l_bond_atoms=[core.Atom(core.Monomer, 'C')]))
bp_form_7 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]), circular=True)
bp_form_8 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]),
nicks=core.NickSet([core.Nick(position=1)]))
bp_form_9 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]),
nicks=core.NickSet([core.Nick(position=1)]))
bp_form_10 = core.BpForm(seq=core.MonomerSequence(
[core.Monomer(id='A'), core.Monomer(id='B')]),
nicks=core.NickSet([core.Nick(position=2)]))
self.assertTrue(bp_form_1.is_equal(bp_form_1))
self.assertTrue(bp_form_1.is_equal(bp_form_2))
self.assertFalse(bp_form_1.is_equal(bp_form_3))
self.assertFalse(bp_form_1.is_equal(bp_form_4))
self.assertFalse(bp_form_1.is_equal(bp_form_5))
self.assertFalse(bp_form_1.is_equal(bp_form_6))
self.assertFalse(bp_form_1.is_equal(bp_form_7))
self.assertFalse(bp_form_1.is_equal(bp_form_8))
self.assertTrue(bp_form_8.is_equal(bp_form_9))
self.assertFalse(bp_form_8.is_equal(bp_form_10))
def test_diff(self):
form_1 = dna.DnaForm()
form_2 = dna.DnaForm()
self.assertEqual(form_1.diff(form_1), None)
self.assertEqual(form_1.diff(form_2), None)
form_2 = rna.RnaForm()
self.assertIn('DnaForm != RnaForm', form_1.diff(form_2))
form_2 = dna.DnaForm()
form_2.alphabet = rna.rna_alphabet
form_2.backbone = core.Backbone(monomer_bond_atoms=[core.Atom(core.Monomer, 'C')])
form_2.bond = core.Bond()
self.assertIn('Forms have different alphabets', form_1.diff(form_2))
self.assertIn('Forms have different backbones', form_1.diff(form_2))
self.assertIn('Forms have different inter-monomer bonds', form_1.diff(form_2))
form_2 = dna.DnaForm().from_str('A')
self.assertIn('Length 0 != 1', form_1.diff(form_2))
form_1 = dna.DnaForm().from_str('A')
form_2 = dna.DnaForm().from_str('C')
self.assertIn('Monomeric form 1', form_1.diff(form_2))
# crosslinks
form_2 = dna.DnaForm().from_str('A|x-link:[]')
self.assertIn('Number of crosslinks 0 != 1', form_1.diff(form_2))
form_1 = dna.DnaForm().from_str('A|x-link:[l-bond-atom: 1C1]')
form_2 = dna.DnaForm().from_str('A|x-link:[l-bond-atom: 2O2]')
self.assertIn('not in self', form_1.diff(form_2))
self.assertIn('not in other', form_1.diff(form_2))
form_1 = dna.DnaForm().from_str('A|x-link:[l-bond-atom: 1C1]')
form_2 = dna.DnaForm().from_str('A|x-link:[l-bond-atom: 1C1]')
self.assertEqual(form_1.diff(form_2), None)
# nicks
form_1 = dna.DnaForm().from_str('AA')
form_2 = dna.DnaForm().from_str('AA')
form_2.nicks.add(core.Nick(position=1))
self.assertIn('Number of nicks 0 != 1', form_1.diff(form_2))
form_1 = dna.DnaForm().from_str('AAA')
form_1.nicks.add(core.Nick(position=1))
form_2 = dna.DnaForm().from_str('AAA')
form_2.nicks.add(core.Nick(position=2))
self.assertIn('not in self', form_1.diff(form_2))
self.assertIn('not in other', form_1.diff(form_2))
form_1 = dna.DnaForm().from_str('AAA')
form_2 = dna.DnaForm().from_str('AAA')
form_1.nicks.add(core.Nick(position=1))
form_2.nicks.add(core.Nick(position=1))
self.assertEqual(form_1.diff(form_2), None)
# circularity
form_1 = dna.DnaForm(circular=False)
form_2 = dna.DnaForm(circular=True)
self.assertIn('Circularity False != True', form_1.diff(form_2))
def test_getitem(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2, monomer_3])
self.assertEqual(bp_form[0], monomer_1)
self.assertEqual(bp_form[1], monomer_2)
self.assertEqual(bp_form[0:1], [monomer_1])
def test_setitem(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2, monomer_3])
self.assertEqual(bp_form[0], monomer_1)
bp_form[0] = monomer_2
self.assertEqual(bp_form[0], monomer_2)
bp_form[0:1] = [monomer_3]
self.assertEqual(bp_form[0], monomer_3)
def test_delitem(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2, monomer_3])
del(bp_form[1])
self.assertTrue(bp_form.is_equal(core.BpForm([monomer_1, monomer_3])))
def test_iter(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2, monomer_3])
for i_monomer, monomer in enumerate(bp_form):
if i_monomer == 0:
self.assertEqual(monomer, monomer_1)
if i_monomer == 1:
self.assertEqual(monomer, monomer_2)
if i_monomer == 2:
self.assertEqual(monomer, monomer_3)
def test_reversed(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2, monomer_3])
for i_monomer, monomer in enumerate(reversed(bp_form)):
if i_monomer == 2:
self.assertEqual(monomer, monomer_1)
if i_monomer == 1:
self.assertEqual(monomer, monomer_2)
if i_monomer == 0:
self.assertEqual(monomer, monomer_3)
def test_contains(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2])
self.assertIn(monomer_1, bp_form)
self.assertIn(monomer_2, bp_form)
self.assertNotIn(monomer_3, bp_form)
def test_len(self):
bp_form = core.BpForm()
self.assertEqual(len(bp_form), 0)
bp_form = core.BpForm(seq=[core.Monomer(), core.Monomer()])
self.assertEqual(len(bp_form), 2)
def test_get_monomer_counts(self):
monomer_1 = core.Monomer(id='A')
monomer_2 = core.Monomer(id='B')
monomer_3 = core.Monomer(id='C')
bp_form = core.BpForm([monomer_1, monomer_2, monomer_1, monomer_1, monomer_1, monomer_2, monomer_2, monomer_3])
self.assertEqual(bp_form.get_monomer_counts(), {
monomer_1: 4,
monomer_2: 3,
monomer_3: 1,
})
def test_get_formula_mol_wt_charge(self):
monomer_A = core.Monomer(id='A', structure=dAMP_smiles)
monomer_C = core.Monomer(id='C', structure=dCMP_smiles)
bp_form = core.BpForm([monomer_A])
self.assertEqual(bp_form.get_formula(), monomer_A.get_formula())
self.assertEqual(bp_form.get_mol_wt(), monomer_A.get_mol_wt())
self.assertEqual(bp_form.get_charge(), monomer_A.get_charge())
bp_form = core.BpForm([monomer_C])
self.assertEqual(bp_form.get_formula(), monomer_C.get_formula())
self.assertEqual(bp_form.get_mol_wt(), monomer_C.get_mol_wt())
self.assertEqual(bp_form.get_charge(), monomer_C.get_charge())
bp_form = core.BpForm([monomer_A, monomer_C])
self.assertEqual(bp_form.get_formula(), monomer_A.get_formula() + monomer_C.get_formula())
self.assertEqual(bp_form.get_mol_wt(), monomer_A.get_mol_wt() + monomer_C.get_mol_wt())
self.assertEqual(bp_form.get_charge(), monomer_A.get_charge() + monomer_C.get_charge())
bp_form = core.BpForm([monomer_A, monomer_C],
bond=core.Bond(r_displaced_atoms=[core.Atom(core.Monomer, 'H', charge=-1, position=1)]))
self.assertEqual(bp_form.get_formula(), monomer_A.get_formula() + monomer_C.get_formula() - EmpiricalFormula('H'))
self.assertEqual(bp_form.get_mol_wt(), monomer_A.get_mol_wt() + monomer_C.get_mol_wt() -
EmpiricalFormula('H').get_molecular_weight())
self.assertEqual(bp_form.get_charge(), monomer_A.get_charge() + monomer_C.get_charge() + 1)
bp_form = core.BpForm([monomer_A, monomer_A, monomer_C, monomer_C, monomer_C],
bond=core.Bond(r_displaced_atoms=[core.Atom(core.Monomer, 'H', charge=-1, position=1)]))
self.assertEqual(bp_form.get_formula(), monomer_A.get_formula() * 2 + monomer_C.get_formula() * 3 - EmpiricalFormula('H') * 4)
self.assertEqual(bp_form.get_mol_wt(), monomer_A.get_mol_wt() * 2 + monomer_C.get_mol_wt()
* 3 - EmpiricalFormula('H').get_molecular_weight() * 4)
self.assertEqual(bp_form.get_charge(), monomer_A.get_charge() * 2 + monomer_C.get_charge() * 3 + 1 * 4)
def test_get_formula_charge_circular(self):
monomer_A = dna.canonical_dna_alphabet.monomers.A
monomer_C = dna.canonical_dna_alphabet.monomers.C
dimer = dna.CanonicalDnaForm([monomer_A, monomer_C])
self.assertEqual(clean_smiles(dimer.export('smiles')),
clean_smiles('Nc1c2ncn(c2ncn1)C1CC(OP(=O)(OCC2C(O)CC(n3c(=O)nc(N)cc3)O2)[O-])C(O1)COP(=O)([O-])[O-]'))
self.assertEqual(dimer.get_formula(), monomer_A.get_formula()
+ monomer_C.get_formula()
+ dimer.backbone.get_formula() * 2
- EmpiricalFormula('HO') * 1)
self.assertEqual(dimer.get_charge(), monomer_A.get_charge()
+ monomer_C.get_charge()
+ dimer.backbone.get_charge() * 2
+ 1 * 1)
dimer.circular = True
self.assertEqual(clean_smiles(dimer.export('smiles')),
clean_smiles('Nc1c2ncn(c2ncn1)C1CC2OP(=O)(OCC3C(OP(=O)(OCC2O1)[O-])CC(n1c(=O)nc(N)cc1)O3)[O-]'))
self.assertEqual(dimer.get_formula(), monomer_A.get_formula()
+ monomer_C.get_formula()
+ dimer.backbone.get_formula() * 2
- EmpiricalFormula('HO') * 2)
self.assertEqual(dimer.get_charge(), monomer_A.get_charge()
+ monomer_C.get_charge()
+ dimer.backbone.get_charge() * 2
+ 1 * 2)
def test_get_formula_charge_crosslinks(self):
monomer_A = dna.canonical_dna_alphabet.monomers.A
monomer_C = dna.canonical_dna_alphabet.monomers.C
dimer = dna.CanonicalDnaForm([monomer_A, monomer_C])
self.assertEqual(clean_smiles(dimer.export('smiles')),
clean_smiles('Nc1c2ncn(c2ncn1)C1CC(OP(=O)(OCC2C(O)CC(n3c(=O)nc(N)cc3)O2)[O-])C(O1)COP(=O)([O-])[O-]'))
self.assertEqual(dimer.get_formula(), monomer_A.get_formula()
+ monomer_C.get_formula()
+ dimer.backbone.get_formula() * 2
- EmpiricalFormula('HO') * 1)
self.assertEqual(dimer.get_charge(), monomer_A.get_charge()
+ monomer_C.get_charge()
+ dimer.backbone.get_charge() * 2
+ 1 * 1)
crosslink = core.Bond(
r_bond_atoms=[core.Atom(core.Monomer, monomer=2, element='O', position=1)],
l_bond_atoms=[core.Atom(core.Monomer, monomer=1, element='P', position=9)],
r_displaced_atoms=[core.Atom(core.Monomer, monomer=2, element='H', position=1)],
l_displaced_atoms=[core.Atom(core.Monomer, monomer=1, element='O', position=12, charge=-1)]
)
dimer.crosslinks = core.BondSet([crosslink])
self.assertEqual(clean_smiles(dimer.export('smiles')),
clean_smiles('Nc1ccn(c(=O)n1)C1OC2C(C1)OP(=O)([O-])OCC1C(OP(=O)(OC2)[O-])CC(O1)n1cnc2c1ncnc2N'))
self.assertEqual(dimer.get_formula(), monomer_A.get_formula()
+ monomer_C.get_formula()
+ dimer.backbone.get_formula() * 2
- EmpiricalFormula('HO') * 2)
self.assertEqual(dimer.get_charge(), monomer_A.get_charge()
+ monomer_C.get_charge()
+ dimer.backbone.get_charge() * 2
+ 1 * 2)
def test_get_major_micro_species(self):
bp_form = dna.CanonicalDnaForm([
dna.canonical_dna_alphabet.monomers.A,
dna.canonical_dna_alphabet.monomers.C,
])
structure = bp_form.get_major_micro_species(7.4, major_tautomer=True)
self.assertEqual(clean_smiles(OpenBabelUtils.export(structure, 'smiles')),
clean_smiles('Nc1nc(=O)n(cc1)C1CC(O)C(COP(=O)([O-])OC2CC(OC2COP(=O)([O-])[O-])n2cnc3c(N)ncnc23)O1'))
bp_form = dna.DnaForm()
self.assertEqual(bp_form.get_major_micro_species(7.), None)
def test_str(self):
monomer_A = core.Monomer(id='A', structure=dAMP_smiles)
monomer_C = core.Monomer(id='C', structure=dCMP_smiles)
monomer_G = core.Monomer(id='G', structure=dGMP_smiles)
bp_form = core.BpForm([monomer_A, monomer_C, monomer_G, monomer_A])
self.assertEqual(str(bp_form), '{}{}{}{}'.format(str(monomer_A), str(monomer_C), str(monomer_G), str(monomer_A)))
bp_form = core.BpForm([monomer_A, monomer_C, monomer_G, monomer_A], alphabet=core.Alphabet(monomers={
'A': monomer_A,
'C': monomer_C,
}))
self.assertEqual(str(bp_form), '{}{}{}{}'.format('A', 'C', str(monomer_G), 'A'))
dGMP_smiles_2 = 'OC1CC(OC1COP(=O)([O-])[O-])n1cnc2c1nc(N)[nH]c2=O'
self.assertEqual(str(bp_form), '{}{}{}{}'.format('A', 'C', '[id: "{}" | structure: "{}"]'.format('G', dGMP_smiles_2), 'A'))
def test_from_str(self):
self.assertTrue(dna.DnaForm().from_str('AAA').is_equal(dna.DnaForm([
dna.dna_alphabet.monomers.A,
dna.dna_alphabet.monomers.A,
dna.dna_alphabet.monomers.A,
])))
self.assertTrue(dna.DnaForm().from_str('ACTG').is_equal(dna.DnaForm([
dna.dna_alphabet.monomers.A, dna.dna_alphabet.monomers.C,
dna.dna_alphabet.monomers.T, dna.dna_alphabet.monomers.G,
| |
95*m.x3*m.x17 + 100*m.x4*m.x14 + 100*m.x5*m.x14
+ 85*m.x6*m.x11 + 100*m.x7*m.x14 + 80*m.x8*m.x14 + 60*m.x9*m.x17 + 70*m.x10*m.x17) - 100*m.x20
- 100*m.x23 - 100*m.x26 - 100*m.x29 == 0)
m.c41 = Constraint(expr=m.x40*m.x32 - (7.5*m.x5*m.x14 + 3.2*m.x6*m.x11 + 10*m.x7*m.x14 + 35*m.x8*m.x14 + 65*m.x9*m.x17
+ 60*m.x10*m.x17) == 0)
m.c42 = Constraint(expr=m.x41*m.x32 - (2*m.x5*m.x14 + m.x7*m.x14 + 3*m.x8*m.x14 + 4*m.x9*m.x17 + 5*m.x10*m.x17) == 0)
m.c43 = Constraint(expr=m.x42*m.x32 - (37*m.x5*m.x14 + 12*m.x6*m.x11 + 60*m.x7*m.x14 + 20*m.x8*m.x14 + 15*m.x9*m.x17 + 3
*m.x10*m.x17) == 0)
m.c44 = Constraint(expr=m.x43*m.x32 - 18.15*m.x20 == 0)
m.c45 = Constraint(expr=m.x44*m.x32 - 15.66*m.x23 == 0)
m.c46 = Constraint(expr=m.x45*m.x32 - 34.73*m.x29 == 0)
m.c47 = Constraint(expr=m.x46*m.x33 - 18.15*m.x21 - 15.66*m.x24 - 15.66*m.x27 - 34.73*m.x30 == 0)
m.c48 = Constraint(expr=m.x47*m.x33 - (50*m.x4*m.x15 + 100*m.x5*m.x15 + 15*m.x6*m.x12 + 200*m.x7*m.x15 + 400*m.x8*m.x15
+ 700*m.x9*m.x18 + 10*m.x10*m.x18) == 0)
m.c49 = Constraint(expr=m.x49*m.x33 - (100*m.x1*m.x12 + 100*m.x2*m.x12 + 50*m.x3*m.x18 + 100*m.x4*m.x15 + 70*m.x5*m.x15
+ 60*m.x6*m.x12 + 85*m.x7*m.x15 + 45*m.x8*m.x15 + 15*m.x9*m.x18 + 30*m.x10*m.x18) - 100*m.x21
- 95*m.x24 - 70*m.x27 - 100*m.x30 == 0)
m.c50 = Constraint(expr=m.x50*m.x33 - (100*m.x1*m.x12 + 100*m.x2*m.x12 + 95*m.x3*m.x18 + 100*m.x4*m.x15 + 100*m.x5*m.x15
+ 85*m.x6*m.x12 + 100*m.x7*m.x15 + 80*m.x8*m.x15 + 60*m.x9*m.x18 + 70*m.x10*m.x18) - 100*m.x21
- 100*m.x24 - 100*m.x27 - 100*m.x30 == 0)
m.c51 = Constraint(expr=m.x51*m.x33 - (7.5*m.x5*m.x15 + 3.2*m.x6*m.x12 + 10*m.x7*m.x15 + 35*m.x8*m.x15 + 65*m.x9*m.x18
+ 60*m.x10*m.x18) == 0)
m.c52 = Constraint(expr=m.x52*m.x33 - (2*m.x5*m.x15 + m.x7*m.x15 + 3*m.x8*m.x15 + 4*m.x9*m.x18 + 5*m.x10*m.x18) == 0)
m.c53 = Constraint(expr=m.x53*m.x33 - (37*m.x5*m.x15 + 12*m.x6*m.x12 + 60*m.x7*m.x15 + 20*m.x8*m.x15 + 15*m.x9*m.x18 + 3
*m.x10*m.x18) == 0)
m.c54 = Constraint(expr=m.x54*m.x33 - 18.15*m.x21 == 0)
m.c55 = Constraint(expr=m.x55*m.x33 - 15.66*m.x24 == 0)
m.c56 = Constraint(expr=m.x56*m.x33 - 34.73*m.x30 == 0)
m.c57 = Constraint(expr=m.x57*m.x34 - 18.15*m.x22 - 15.66*m.x25 - 15.66*m.x28 - 34.73*m.x31 == 0)
m.c58 = Constraint(expr=m.x58*m.x34 - (50*m.x4*m.x16 + 100*m.x5*m.x16 + 15*m.x6*m.x13 + 200*m.x7*m.x16 + 400*m.x8*m.x16
+ 700*m.x9*m.x19 + 10*m.x10*m.x19) == 0)
m.c59 = Constraint(expr=m.x60*m.x34 - (100*m.x1*m.x13 + 100*m.x2*m.x13 + 50*m.x3*m.x19 + 100*m.x4*m.x16 + 70*m.x5*m.x16
+ 60*m.x6*m.x13 + 85*m.x7*m.x16 + 45*m.x8*m.x16 + 15*m.x9*m.x19 + 30*m.x10*m.x19) - 100*m.x22
- 95*m.x25 - 70*m.x28 - 100*m.x31 == 0)
m.c60 = Constraint(expr=m.x61*m.x34 - (100*m.x1*m.x13 + 100*m.x2*m.x13 + 95*m.x3*m.x19 + 100*m.x4*m.x16 + 100*m.x5*m.x16
+ 85*m.x6*m.x13 + 100*m.x7*m.x16 + 80*m.x8*m.x16 + 60*m.x9*m.x19 + 70*m.x10*m.x19) - 100*m.x22
- 100*m.x25 - 100*m.x28 - 100*m.x31 == 0)
m.c61 = Constraint(expr=m.x62*m.x34 - (7.5*m.x5*m.x16 + 3.2*m.x6*m.x13 + 10*m.x7*m.x16 + 35*m.x8*m.x16 + 65*m.x9*m.x19
+ 60*m.x10*m.x19) == 0)
m.c62 = Constraint(expr=m.x63*m.x34 - (2*m.x5*m.x16 + m.x7*m.x16 + 3*m.x8*m.x16 + 4*m.x9*m.x19 + 5*m.x10*m.x19) == 0)
m.c63 = Constraint(expr=m.x64*m.x34 - (37*m.x5*m.x16 + 12*m.x6*m.x13 + 60*m.x7*m.x16 + 20*m.x8*m.x16 + 15*m.x9*m.x19 + 3
*m.x10*m.x19) == 0)
m.c64 = Constraint(expr=m.x65*m.x34 - 18.15*m.x22 == 0)
m.c65 = Constraint(expr=m.x66*m.x34 - 15.66*m.x25 == 0)
m.c66 = Constraint(expr=m.x67*m.x34 - 34.73*m.x31 == 0)
m.c67 = Constraint(expr=m.x37**1.25*m.x32 - (166.989461022824*m.x1*m.x11 + 44.9545980014895*m.x2*m.x11 +
12.2050524378911*m.x3*m.x17 + 17.7827941003892*m.x4*m.x14 + 15.5884572681199*m.x5*m.x14 +
4.61688063363795*m.x6*m.x11 + 18.2284698685036*m.x7*m.x14 + 13.8760966290575*m.x8*m.x14 +
2.5279828213557*m.x9*m.x17 + 12.2050524378911*m.x10*m.x17) - 15.1566541273553*m.x20
- 8.80731581347371*m.x23 - 3.4610247518095*m.x26 - 50.3685901711814*m.x29 == 0)
m.c68 = Constraint(expr=m.x48**1.25*m.x33 - (166.989461022824*m.x1*m.x12 + 44.9545980014895*m.x2*m.x12 +
12.2050524378911*m.x3*m.x18 + 17.7827941003892*m.x4*m.x15 + 15.5884572681199*m.x5*m.x15 +
4.61688063363795*m.x6*m.x12 + 18.2284698685036*m.x7*m.x15 + 13.8760966290575*m.x8*m.x15 +
2.5279828213557*m.x9*m.x18 + 12.2050524378911*m.x10*m.x18) - 15.1566541273553*m.x21
- 8.80731581347371*m.x24 - 3.4610247518095*m.x27 - 50.3685901711814*m.x30 == 0)
m.c69 = Constraint(expr=m.x59**1.25*m.x34 - (166.989461022824*m.x1*m.x13 + 44.9545980014895*m.x2*m.x13 +
12.2050524378911*m.x3*m.x19 + 17.7827941003892*m.x4*m.x16 + 15.5884572681199*m.x5*m.x16 +
4.61688063363795*m.x6*m.x13 + 18.2284698685036*m.x7*m.x16 + 13.8760966290575*m.x8*m.x16 +
2.5279828213557*m.x9*m.x19 + 12.2050524378911*m.x10*m.x19) - 15.1566541273553*m.x22
- 8.80731581347371*m.x25 - 3.4610247518095*m.x28 - 50.3685901711814*m.x31 == 0)
m.c70 = Constraint(expr=-53.54*(0.444*exp((-1.26152) + 0.0006197*m.x36 + 0.22239*m.x41 + 0.02655*m.x285 - 0.003376*
m.x288) + 0.556*exp((-1.76845) - 0.096047*m.x35 + 0.000337*m.x36 + 0.222318*m.x41 + 0.011882*
m.x285 + 0.011251*m.x288)) + m.x318 == 0)
m.c71 = Constraint(expr=-53.54*(0.444*exp((-1.26152) + 0.0006197*m.x47 + 0.22239*m.x52 + 0.02655*m.x286 - 0.003376*
m.x289) + 0.556*exp((-1.76845) - 0.096047*m.x46 + 0.000337*m.x47 + 0.222318*m.x52 + 0.011882*
m.x286 + 0.011251*m.x289)) + m.x319 == 0)
m.c72 = Constraint(expr=-53.54*(0.444*exp((-1.26152) + 0.0006197*m.x58 + 0.22239*m.x63 + 0.02655*m.x287 - 0.003376*
m.x290) + 0.556*exp((-1.76845) - 0.096047*m.x57 + 0.000337*m.x58 + 0.222318*m.x63 + 0.011882*
m.x287 + 0.011251*m.x290)) + m.x320 == 0)
m.c73 = Constraint(expr=-9.7*(0.444*exp(1.07807 + 0.0462131*m.x43 - 0.007166*m.x285 - 0.010226*m.x288) + 0.556*exp(
1.36651 - 0.031352*m.x42 + 0.0462131*m.x43 - 0.007166*m.x285 - 0.010226*m.x288)) + m.x321 == 0)
m.c74 = Constraint(expr=-9.7*(0.444*exp(1.07807 + 0.0462131*m.x54 - 0.007166*m.x286 - 0.010226*m.x289) + 0.556*exp(
1.36651 - 0.031352*m.x53 + 0.0462131*m.x54 - 0.007166*m.x286 - 0.010226*m.x289)) + m.x322 == 0)
m.c75 = Constraint(expr=-9.7*(0.444*exp(1.07807 + 0.0462131*m.x65 - 0.007166*m.x287 - 0.010226*m.x290) + 0.556*exp(
1.36651 - 0.031352*m.x64 + 0.0462131*m.x65 - 0.007166*m.x287 - 0.010226*m.x290)) + m.x323 == 0)
m.c76 = Constraint(expr=-4.44*(0.444*exp(0.751747 + 0.0002631*m.x36 + 0.039786*m.x37 - 0.009594*m.x43 + 0.31658*m.x44 +
0.24925*m.x45 - 0.005525*m.x285 - 0.012172*m.x288) + 0.556*exp(1.09751 + 0.0002627*m.x36 -
0.05598*m.x43 + 0.3164665*m.x44 + 0.2493259*m.x45 - 0.005548*m.x285 - 0.012157*m.x288)) + m.x324
== 0)
m.c77 = Constraint(expr=-4.44*(0.444*exp(0.751747 + 0.0002631*m.x47 + 0.039786*m.x48 - 0.009594*m.x54 + 0.31658*m.x55 +
0.24925*m.x56 - 0.005525*m.x286 - 0.012172*m.x289) + 0.556*exp(1.09751 + 0.0002627*m.x47 -
0.05598*m.x54 + 0.3164665*m.x55 + 0.2493259*m.x56 - 0.005548*m.x286 - 0.012157*m.x289)) + m.x325
== 0)
m.c78 = Constraint(expr=-4.44*(0.444*exp(0.751747 + 0.0002631*m.x58 + 0.039786*m.x59 - 0.009594*m.x65 + 0.31658*m.x66 +
0.24925*m.x67 - 0.005525*m.x287 - 0.012172*m.x290) + 0.556*exp(1.09751 + 0.0002627*m.x58 -
0.05598*m.x65 + 0.3164665*m.x66 + 0.2493259*m.x67 - 0.005548*m.x287 - 0.012157*m.x290)) + m.x326
== 0)
m.c79 = Constraint(expr=-9.38*(0.444*exp(1.34704 + 0.0001552*m.x36 - 0.007253*m.x38 + 0.028235*m.x42 - 0.004005*m.x285
- 0.014866*m.x288) + 0.556*exp(0.694224 - 0.060771*m.x35 - 0.007311*m.x38 + 0.043696*m.x42 -
0.004005*m.x285 - 0.008052*m.x288)) + m.x327 == 0)
m.c80 = Constraint(expr=-9.38*(0.444*exp(1.34704 + 0.0001552*m.x47 - 0.007253*m.x49 + 0.028235*m.x53 - 0.004005*m.x286
- 0.014866*m.x289) + 0.556*exp(0.694224 - 0.060771*m.x46 - 0.007311*m.x49 + 0.043696*m.x53 -
0.004005*m.x286 - 0.008052*m.x289)) + m.x328 == 0)
m.c81 = Constraint(expr=-9.38*(0.444*exp(1.34704 + 0.0001552*m.x58 - 0.007253*m.x60 + 0.028235*m.x64 - 0.004005*m.x287
- 0.014866*m.x290) + 0.556*exp(0.694224 - 0.060771*m.x57 - 0.007311*m.x60 + 0.043696*m.x64 -
0.004005*m.x287 - 0.008052*m.x290)) + m.x329 == 0)
m.c82 = Constraint(expr=-10*(1.75021*m.x41 - 0.603184*m.x37*m.x41 - 0.0402619*m.x43*m.x41 + 0.0738116*m.x37*m.x37*m.x41
+ 0.0116427*m.x37*m.x43*m.x41 - 0.00255327*m.x37*m.x37*m.x37*m.x41 - 0.0010494*m.x37*m.x37*
m.x43*m.x41) + m.x330 == 0)
m.c83 = Constraint(expr=-10*(1.75021*m.x52 - 0.603184*m.x48*m.x52 - 0.0402619*m.x54*m.x52 + 0.0738116*m.x48*m.x48*m.x52
+ 0.0116427*m.x48*m.x54*m.x52 - 0.00255327*m.x48*m.x48*m.x48*m.x52 - 0.0010494*m.x48*m.x48*
m.x54*m.x52) + m.x331 == 0)
m.c84 = Constraint(expr=-10*(1.75021*m.x63 - 0.603184*m.x59*m.x63 - 0.0402619*m.x65*m.x63 + 0.0738116*m.x59*m.x59*m.x63
+ 0.0116427*m.x59*m.x65*m.x63 - 0.00255327*m.x59*m.x59*m.x59*m.x63 - 0.0010494*m.x59*m.x59*
m.x65*m.x63) + m.x332 == 0)
m.c85 = Constraint(expr= 0.003355*m.x297 + m.x318 + m.x321 + m.x324 + m.x327 + m.x330 <= 95)
m.c86 = Constraint(expr= 0.003355*m.x298 + m.x319 + m.x322 + m.x325 + m.x328 + m.x331 <= 95)
m.c87 = Constraint(expr= 0.003355*m.x299 + m.x320 + m.x323 + m.x326 + m.x329 + m.x332 <= 95)
m.c88 = Constraint(expr= - m.x276 + m.x288 == 0)
m.c89 = Constraint(expr= - m.x277 + m.x289 == 0)
m.c90 = Constraint(expr= - m.x278 + m.x290 == 0)
m.c91 = Constraint(expr= 40*m.b72 + m.x285 <= 50)
m.c92 = Constraint(expr= 40*m.b73 + m.x286 <= 50)
m.c93 = Constraint(expr= 40*m.b74 + m.x287 <= 50)
m.c94 = Constraint(expr= 10*m.b72 - m.x285 <= 0)
m.c95 = Constraint(expr= 10*m.b73 - m.x286 <= 0)
m.c96 = Constraint(expr= 10*m.b74 - m.x287 <= 0)
m.c97 = Constraint(expr= - m.x40 - 50*m.b72 + m.x285 <= 0)
m.c98 = Constraint(expr= - m.x51 - 50*m.b73 + m.x286 <= 0)
m.c99 = Constraint(expr= - m.x62 - 50*m.b74 + m.x287 <= 0)
m.c100 = Constraint(expr= m.x40 - 50*m.b72 - m.x285 <= 0)
m.c101 = Constraint(expr= m.x51 - 50*m.b73 - m.x286 <= 0)
m.c102 = Constraint(expr= m.x62 - 50*m.b74 - m.x287 <= 0)
m.c103 = Constraint(expr=-1340*(0.738*exp((-0.497032) + 0.0006921*m.x36 - 6.63e-7*m.x36**2 - 0.000119*m.x279**2 +
0.0083632*m.x279 + 0.0003665*m.x282**2 - 0.002774*m.x282 + 0.0018571*m.x35 + 0.0090744*m.x37 +
0.000931*m.x38 + 0.000846*m.x276)*m.x312 + 0.262*exp(0.179906 + 0.007097*m.x279 - 7.995e-5*
m.x279**2 + 0.0003665*m.x282**2 - 0.00276*m.x282 - 0.00913*m.x35 + 0.000252*m.x36 - 0.01397*
m.x37 + 0.000931*m.x38 - 0.00401*m.x276)*m.x313) + m.x309 == 0)
m.c104 = Constraint(expr=-1340*(0.738*exp((-0.497032) + 0.0006921*m.x47 - 6.63e-7*m.x47**2 - 0.000119*m.x280**2 +
0.0083632*m.x280 + 0.0003665*m.x283**2 - 0.002774*m.x283 + 0.0018571*m.x46 + 0.0090744*m.x48 +
0.000931*m.x49 + 0.000846*m.x277)*m.x314 + 0.262*exp(0.179906 + 0.007097*m.x280 - 7.995e-5*
m.x280**2 + 0.0003665*m.x283**2 - 0.00276*m.x283 - 0.00913*m.x46 + 0.000252*m.x47 - 0.01397*
m.x48 + 0.000931*m.x49 - 0.00401*m.x277)*m.x315) + m.x310 == 0)
m.c105 = Constraint(expr=-1340*(0.738*exp((-0.497032) + 0.0006921*m.x58 - 6.63e-7*m.x58**2 - 0.000119*m.x281**2 +
0.0083632*m.x281 + 0.0003665*m.x284**2 - 0.002774*m.x284 + 0.0018571*m.x57 + 0.0090744*m.x59 +
0.000931*m.x60 + 0.000846*m.x278)*m.x316 + 0.262*exp(0.179906 + 0.007097*m.x281 - 7.995e-5*
m.x281**2 + 0.0003665*m.x284**2 - 0.00276*m.x284 - 0.00913*m.x57 + 0.000252*m.x58 - 0.01397*
m.x59 + 0.000931*m.x60 - 0.00401*m.x278)*m.x317) + m.x311 == 0)
m.c106 = Constraint(expr= m.x309 <= 1400)
m.c107 = Constraint(expr= m.x310 <= 1400)
m.c108 = Constraint(expr= m.x311 <= 1400)
m.c109 = Constraint(expr= - m.x234 - m.x240 - m.x246 + m.x312 == 1)
m.c110 = Constraint(expr= - m.x235 - m.x241 - m.x247 + m.x313 == 1)
m.c111 = Constraint(expr= - m.x236 - | |
decreasing=None,
dx=None,
dy=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
increasing=None,
insidetextanchor=None,
insidetextfont=None,
legendgroup=None,
measure=None,
measuresrc=None,
meta=None,
metasrc=None,
name=None,
offset=None,
offsetgroup=None,
offsetsrc=None,
opacity=None,
orientation=None,
outsidetextfont=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textangle=None,
textfont=None,
textinfo=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
totals=None,
uid=None,
uirevision=None,
visible=None,
width=None,
widthsrc=None,
x=None,
x0=None,
xaxis=None,
xperiod=None,
xperiod0=None,
xperiodalignment=None,
xsrc=None,
y=None,
y0=None,
yaxis=None,
yperiod=None,
yperiod0=None,
yperiodalignment=None,
ysrc=None,
row=None,
col=None,
secondary_y=None,
**kwargs
):
"""
Add a new Waterfall trace
Draws waterfall trace which is useful graph to displays the
contribution of various elements (either positive or negative)
in a bar chart. The data visualized by the span of the bars is
set in `y` if `orientation` is set th "v" (the default) and the
labels are set in `x`. By setting `orientation` to "h", the
roles are interchanged.
Parameters
----------
alignmentgroup
Set several traces linked to the same position axis or
matching axes to the same alignmentgroup. This controls
whether bars compute their positional range dependently
or independently.
base
Sets where the bar base is drawn (in position axis
units).
cliponaxis
Determines whether the text nodes are clipped about the
subplot axes. To show the text nodes above axis lines
and tick labels, make sure to set `xaxis.layer` and
`yaxis.layer` to *below traces*.
connector
:class:`plotly.graph_objects.waterfall.Connector`
instance or dict with compatible properties
constraintext
Constrain the size of text inside or outside a bar to
be no larger than the bar itself.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
decreasing
:class:`plotly.graph_objects.waterfall.Decreasing`
instance or dict with compatible properties
dx
Sets the x coordinate step. See `x0` for more info.
dy
Sets the y coordinate step. See `y0` for more info.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.waterfall.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `initial`, `delta` and
`final`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each (x,y)
pair. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (x,y)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
increasing
:class:`plotly.graph_objects.waterfall.Increasing`
instance or dict with compatible properties
insidetextanchor
Determines if texts are kept at center or start/end
points in `textposition` "inside" mode.
insidetextfont
Sets the font used for `text` lying inside the bar.
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
measure
An array containing types of values. By default the
values are considered as 'relative'. However; it is
possible to use 'total' to compute the sums. Also
'absolute' could be applied to reset the computed total
or to declare an initial value where needed.
measuresrc
Sets the source reference on Chart Studio Cloud for
measure .
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
offset
Shifts the position where the bar is drawn (in position
axis units). In "group" barmode, traces that set
"offset" will be excluded and drawn in "overlay" mode
instead.
offsetgroup
Set several traces linked to the same position axis or
matching axes to the same offsetgroup where bars of the
same position coordinate will line up.
offsetsrc
Sets the source reference on Chart Studio Cloud for
offset .
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the bars. With "v" ("h"), the
value of the each bar spans along the vertical
(horizontal).
outsidetextfont
Sets the font used for `text` lying outside the bar.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.waterfall.Stream` instance
or dict with compatible properties
text
Sets text elements associated with each (x,y) pair. If
a single string, the same string appears over all the
data points. If an array of string, the items are
mapped in order to the this trace's (x,y) coordinates.
If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textangle
Sets the angle of the tick labels with respect to the
bar. For example, a `tickangle` of -90 draws the tick
labels vertically. With "auto" the texts may
automatically be rotated to fit with the maximum size
in bars.
textfont
Sets the font used for `text`.
textinfo
Determines which trace information appear on the graph.
In the case of having multiple waterfalls, totals are
computed separately (per trace).
textposition
Specifies the location of the `text`. "inside"
positions `text` inside, next to the bar end (rotated
and scaled if needed). "outside" positions `text`
outside, next to the bar end (scaled if needed), unless
there is another bar stacked on this one, then the text
gets pushed inside. "auto" tries to position `text`
inside the bar, but if the bar is too small and no bar
is stacked on this one the text is moved outside.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
textposition .
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are | |
= [abs_root]
rel_path = self.__ParseMacros(rel_path)
if not rel_path or rel_path[0] != '/':
path_parts.append(self.__ParseMacros(self.RELEASE_MOD_FOLDER))
path_parts.append(rel_path)
abs_path = os.path.normpath(os.sep.join(path_parts))
if '=>NULL' in abs_path and not allow_unresolved:
print 'ERROR: Cannot construct release path due to unresolved component(s):', abs_path
exit(-1)
elif os.path.relpath(abs_path, abs_root).startswith('..'):
print 'ERROR: Destination path is outside of GameData release folder:', abs_path, abs_root
exit(-1)
return abs_path
# Checks if JSON settings file can be handled by this version of the script.
def __CheckSchemaVersion(self, schema_version):
if not schema_version:
print 'ERROR: JSON schema is not defined'
exit(-1)
major, minor = [int(x) for x in schema_version.split('.')]
supported_major, supported_minor = [int(x) for x in SUPPORTED_JSON_SCHEMA_VERSION.split('.')]
if major != supported_major or minor > supported_minor:
print 'ERROR: Unsupported schema version %s' % schema_version
exit(-1)
# Resolves absolute path to the project root and adjusts {PROJECT_ROOT} if needed.
def __ResolveProjectRoot(self):
if self.PROJECT_ROOT == '#github':
repository_path = self.__ABS_SCRIPT_ROOT
i = 50 # Reasonably high nested level value to prevent endless loops.
while not os.path.exists(os.path.join(repository_path, '.git')):
i -= 1
repository_path = os.path.abspath(os.path.join(repository_path, '..'))
if os.path.relpath(repository_path, '/') == '.' or not i:
print 'ERROR: Cannot find GitHub repository for:', self.__ABS_SCRIPT_ROOT
exit(-1)
self.__ABS_PROJECT_ROOT = repository_path
self.PROJECT_ROOT = os.path.relpath(self.__ABS_PROJECT_ROOT, self.__ABS_SCRIPT_ROOT)
print 'Found GitHub repository at: %s (PROJECT_ROOT=%s)' % (
repository_path, self.PROJECT_ROOT)
else:
self.__ABS_PROJECT_ROOT = os.path.join(self.__ABS_SCRIPT_ROOT, self.PROJECT_ROOT)
print 'Set repository path from settings: %s (PROJECT_ROOT=%s)' % (
self.__ABS_PROJECT_ROOT, self.PROJECT_ROOT)
# Makes the binary.
def __Step_CompileBinary(self):
os.chdir(self.__ABS_SCRIPT_ROOT)
binary_path = self.__MakeSrcPath(self.COMPILED_BINARY)
print 'Compiling sources in PROD mode...'
code = subprocess.call(
[os.path.join(self.__ABS_SCRIPT_ROOT, self.SHELL_COMPILE_BINARY_SCRIPT)])
if code != 0 or not os.path.exists(binary_path):
print 'ERROR: Compilation failed. Cannot find target DLL:', binary_path
exit(code)
# Purges any existed files in the release folder.
def __Step_CleanupReleaseFolder(self):
path = self.__GetAbsReleaseFolder()
print 'Cleanup release folder...'
self.__OsSafeDeleteFromDest(path)
# Creates whole release structure and copies the required files.
def __Step_MakeFoldersStructure(self):
def target_cmp_fn(x, y):
if x and x[0] == '/' and (not y or y[0] != '/'):
return -1
if y and y[0] == '/' and (not x or x[0] != '/'):
return 1
return cmp(x, y)
print '=== START: Building release structure:'
sorted_targets = sorted(
self.STRUCTURE.iteritems(), key=lambda x: x[0], cmp=target_cmp_fn)
for (dest_folder, src_patterns) in sorted_targets:
dest_path = self.__MakeDestPath(dest_folder)
print 'Release folder:', dest_path
copy_sources = None
drop_patterns = []
for src_pattern in src_patterns:
allow_no_matches = False
is_drop_pattern = False
if src_pattern[0] == '?':
allow_no_matches = True
pattern = self.__MakeSrcPath(src_pattern[1:], allow_unresolved=True)
if '=>NULL' in pattern:
print '=> skip unresolved copy pattern:', pattern
continue
elif src_pattern[0] == '-':
is_drop_pattern = True
drop_patterns.append(src_pattern[1:])
continue
else:
pattern = self.__MakeSrcPath(src_pattern)
entry_sources = glob.glob(pattern)
if not entry_sources:
if allow_no_matches:
print '=> skip copy pattern "%s" since no matches found' % pattern
else:
print 'ERROR: Nothing is found for pattern:', pattern
print 'HINT: If this pattern is allowed to return nothing then add prefix "?"'
exit(-1)
if copy_sources is None:
copy_sources = []
copy_sources.extend(entry_sources)
# Copy files.
if copy_sources is not None:
for source in copy_sources:
self.__OsSafeCopyToRelease(source, dest_path)
if not copy_sources:
print '=> skip empty folder:', source
# Drop files.
for pattern in drop_patterns:
cleanup_path = self.__MakeDestPath(os.path.join(dest_folder, pattern))
relpath = os.path.relpath(cleanup_path, dest_path)
head, _ = os.path.split(relpath)
if relpath.startswith('..') or head:
print ('ERROR: Cleanup pattern must designate an entity in the target folder:'
' pattern=%s, target=%s' % (cleanup_path, dest_path))
exit(-1)
targets = glob.glob(cleanup_path)
if targets:
for target in targets:
self.__OsSafeDeleteFromDest(target)
else:
print '=> skip cleanup pattern "%s" since no matches found' % cleanup_path
print '=== END: Building release structure'
# Extracts version number of the release from the sources.
def __Step_ExtractVersion(self):
file_path = self.__MakeSrcPath(self.ASSEMBLY_INFO_FILE)
print 'Extract release version...'
if self.VERSION:
print '=> already set:', self.VERSION
return
print '=> AssemblyInfo:', file_path
with open(file_path) as f:
content = f.readlines()
for line in content:
if line.lstrip().startswith('//'):
continue
# Expect: [assembly: AssemblyVersion("X.Y.Z")]
matches = re.match(
r'\[assembly: AssemblyVersion.*\("(\d+)\.(\d+)\.(\*|\d+)(.(\*|\d+))?"\)\]', line)
if matches:
self.__MakeVersion(
matches.group(1), matches.group(2), matches.group(3), matches.group(5) or 0)
break
if self.VERSION is None:
print 'ERROR: Cannot extract version from: %s' % file_path
exit(-1)
print '=> found version: v%d.%d, patch %d, build %d' % self.VERSION
# Updates the source files with the version info.
def __Step_UpdateVersionInSources(self):
print 'Update MiniAVC info...'
if not self.MINIAVC_VERSION_FILE:
print '=> no version file defined, skipping'
return
version_file = self.__MakeSrcPath(self.MINIAVC_VERSION_FILE)
print '=> version file:', version_file
with open(version_file) as fp:
content = json.load(fp);
if not 'VERSION' in content:
print 'ERROR: Cannot find VERSION in:', version_file
exit(-1)
content['VERSION']['MAJOR'] = self.VERSION[0]
content['VERSION']['MINOR'] = self.VERSION[1]
content['VERSION']['PATCH'] = self.VERSION[2]
content['VERSION']['BUILD'] = self.VERSION[3]
with open(version_file, 'w') as fp:
json.dump(content, fp, indent=4, sort_keys=True)
# Creates a package for re-destribution.
def __Step_MakePackage(self, overwrite_existing):
print 'Making %s package...' % (self.PACKAGE_NAME or '<NONE>')
if self.RELEASE_NAME_FREE_FORMAT:
release_name = self.__ParseMacros(self.RELEASE_NAME_FREE_FORMAT).format(*self.VERSION)
else:
release_name = (self.VERSION[3]
and self.__ParseMacros(self.RELEASE_NAME_WITH_BUILD_FMT % self.VERSION)
or self.__ParseMacros(self.RELEASE_NAME_FMT % self.VERSION[:3]))
package_file_name = self.__MakeSrcPath(os.path.join('/', self.ARCHIVE_DEST, release_name))
archive_name = package_file_name + '.zip'
if os.path.exists(archive_name):
if not overwrite_existing:
print 'ERROR: Package for this version already exists: %s' % archive_name
exit(-1)
print '=> package already exists. DELETING.'
os.remove(archive_name)
shutil.make_archive(package_file_name, 'zip', self.__GetAbsReleaseFolder(), 'GameData')
print '=> stored in:', package_file_name
# Fills VERSION given the string or int compinents. The patch and build could be "*".
def __MakeVersion(self, major, minor, patch, build):
# Get build/rev from the binary if it's auto generated.
if build == '*' or patch == '*':
filename = self.__MakeSrcPath(self.COMPILED_BINARY)
version = self.__GetFileInfo(filename) or ''
parts = version.split('.')
if patch == '*' and len(parts) >= 3:
patch = parts[2]
if len(parts) >= 4:
build = parts[3]
# Handle fallbacks in case of the version wasn't extracted.
if patch == '*':
print 'WARNING: Couldn\'t resolve version PATCH, fallback to 0'
patch = 0
if build == '*':
print 'WARNING: Couldn\'t resolve version BUILD, fallback to 0'
build = 0
# Fill the version
self.VERSION = (int(major), int(minor), int(patch), int(build))
# Checks if path doesn't try to address file above the root. All path arguments can contain
# macros.
#
# @param test_path Path to check.
# @param chroot Optional path to use as root. When not specified root is {PROJECT_ROOT}.
# @param action Name of the action that needs the check. It will be reported in case of negative
# result.
# @return Absolute path for {@code test_path}.
def __CheckChroot(self, test_path, chroot=None, action=None):
abs_test_path = os.path.abspath(self.__ParseMacros(test_path))
abs_chroot = os.path.abspath(os.path.join(self.__ParseMacros(chroot or self.PROJECT_ROOT)))
rel_path = os.path.relpath(abs_test_path, abs_chroot)
if rel_path.startswith('..'):
print 'ERROR: Action %s is not permitted above the root: %s (root=%s)' % (
action, abs_test_path, abs_chroot)
raise RuntimeError('Path is not secure!')
return abs_test_path
# Creates all elements in the path if they don't exist. Ensures the folders are created within
# {PROJECT_ROOT}.
# Folder name supports macros.
def __OsSafeMakedirs(self, folder):
abs_path = self.__CheckChroot(folder, action='MAKE PATH')
if not os.path.isdir(abs_path):
print '=> create folder:', abs_path
os.makedirs(abs_path)
# Copies file or folder. Ensures that source is defined within {PROJECT_ROOT} and target is in
# {RELEASE}.
# Both {@code src} and {@code dest} must be absolute or relative OS paths. No macros supported.
def __OsSafeCopyToRelease(self, src, dest, source_must_exist=True):
abs_src = self.__CheckChroot(src, action='COPY-FROM')
abs_dest = self.__CheckChroot(dest, chroot=self.__GetAbsReleaseFolder(), action='COPY-TO')
if os.path.isfile(abs_src):
self.__OsSafeMakedirs(abs_dest)
print '=> copy file:', abs_src
shutil.copy(abs_src, abs_dest)
elif os.path.isdir(abs_src):
print '=> copy folder:', abs_src
shutil.copytree(abs_src, os.path.join(abs_dest, os.path.basename(abs_src)))
else:
if source_must_exist:
print 'ERROR: Source path not found"', abs_src
exit(-1)
print "=> skipping:", abs_src
# Copies file or folder. Ensures that path is defined within {RELEASE}.
def __OsSafeDeleteFromDest(self, path):
abs_path = self.__CheckChroot(path, chroot=self.__GetAbsReleaseFolder(), action='DELETE')
if os.path.isfile(abs_path):
print '=> drop file:', abs_path
os.unlink(abs_path)
else:
print '=> drop folder:', abs_path
shutil.rmtree(abs_path, True)
# Extracts information from a DLL file.
def __GetFileInfo(self, filename):
filename = u'' + filename # Ensure it's wide-string encoding.
size = ctypes.windll.version.GetFileVersionInfoSizeW(filename, None)
if not size:
return None
res = ctypes.create_string_buffer(size)
if not ctypes.windll.version.GetFileVersionInfoW(filename, None, size, res):
return None
l = ctypes.c_uint()
r = ctypes.c_void_p()
if not ctypes.windll.version.VerQueryValueA(
res, '\\VarFileInfo\\Translation', ctypes.byref(r), ctypes.byref(l)):
return None
if not l.value:
return None
codepages = array.array('H', ctypes.string_at(r.value, l.value))
codepage = tuple(codepages[:2].tolist())
r = ctypes.c_char_p()
if not ctypes.windll.version.VerQueryValueA(
res, ('\\StringFileInfo\\%04x%04x\\FileVersion') % codepage,
ctypes.byref(r), ctypes.byref(l)):
return None
return ctypes.string_at(r)
# Default JSON settings file to search in the current | |
import importlib
import string
import re
from .vocab import english
from .grammar import Command, GrammarObject
from .verb import (
ScoreVerb,
FullScoreVerb,
HelpVerbVerb,
GetVerb,
StandUpVerb,
DropVerb,
RemoveFromVerb,
)
from .thing_base import Thing
from .things import Container, Surface, UnderSpace, Liquid
from .room import Room
from .travel import directionDict
from .tokenizer import cleanInput, tokenize, removeArticles
from .exceptions import (
NoMatchingSuggestion,
VerbDefinitionError,
VerbMatchError,
ObjectMatchError,
ParserError,
OutOfRange,
AbortTurn,
)
##############################################################
# PARSER.PY - the parser for IntFicPy
# Contains the Parser class
##############################################################
class Parser:
def __init__(self, game):
self.game = game
self.command = Command()
self.previous_command = Command()
self.previous_command.dobj = GrammarObject()
self.previous_command.iobj = GrammarObject()
self.turns = 0
def recordInput(self, input_string):
self.game.turn_list.append(input_string)
if self.game.recfile:
with open(self.game.recfile, "a") as recfile:
recfile.write(input_string + "\n")
return input_string
def clearCommand(self):
if self.previous_command:
del self.previous_command
self.previous_command = self.command
self.command = None
def replace_string_vars(self, text):
"""
Perform string replacements for text in the format
<<main_module.module.attribute ... >>
This should be called by the Game instance when text is
added to an event
"""
if not ("<<" in text and ">>" in text):
return text
tokens = re.split(r"(<<[a-zA-Z0-9\.\(\)_]+>>)", text)
text = ""
for tok in tokens:
if tok.startswith("<<") and tok.endswith(">>"):
if "(" in tok:
raise NotImplementedError(
f"IntFicPy cannot perform the replacement `{tok}`. "
"<<syntax>>> string replacement does not support inserting "
"return values from functions or callables"
)
nested_attrs = tok[2:-2]
nested_attrs = nested_attrs.split(".")
obj = self.game.main
for attr in nested_attrs:
obj = getattr(obj, attr)
tok = str(obj)
text += tok
return text
def getDirection(self):
"""
Check for direction statement as in "west" or "ne"
Called every turn by self.parseInput
Raises AbortTurn on discovering & executing a travel command
"""
d = self.command.tokens[0]
if d in directionDict and len(self.command.tokens) == 1:
if self.previous_command.ambiguous:
candidates = []
for obj in self.previous_command.things:
if d in obj.adjectives:
return False
directionDict[d]["func"](self.game)
raise AbortTurn("Executed direction statement")
return
def getCurVerb(self):
"""
Identify the verb
Called every turn by self.parseInput
Returns a two item list. The first is a Verb object and an associated verb form
(list of strings), or None.
The second is True if potential verb matches were found, False otherwise
"""
# look up first word in verb dictionary
if self.command.primary_verb_token in self.game.verbs:
self.command.verb_matches = list(
self.game.verbs[self.command.primary_verb_token]
)
self.matchPrepKeywords()
self.verbByObjects()
if self.command.verb:
return
self.checkForConvCommand()
self.command.err = True
if self.previous_command.specialTopics or (
self.previous_command.sequence and self.previous_command.sequence.options
):
raise ParserError(
f"{' '.join(self.command.tokens).capitalize()} is not enough information "
"to match a suggestion. "
)
if self.previous_command.ambiguous or self.previous_command.specialTopics:
self.disambig()
raise AbortTurn("Disambiguation complete.")
self.getDirection()
raise VerbMatchError(
f"I don't understand the verb: {self.command.primary_verb_token}"
)
def checkForConvCommand(self):
self.sendTokensToCurrentSequence()
if self.previous_command.specialTopics and self.getConvCommand():
raise AbortTurn("Accepted conversation suggestion")
def verbByObjects(self):
"""
Disambiguates verbs based on syntax used
Iterates through verb list, comparing syntax in input to the entries in the
.syntax attribute of the verb
"""
self.checkForConvCommand()
match_pairs = []
for cur_verb in self.command.verb_matches:
for verb_form in cur_verb.syntax:
i = len(verb_form)
for word in verb_form:
if word[0] != "<":
if word not in self.command.tokens:
break
else:
i = i - 1
else:
i = i - 1
if i == 0:
match_pairs.append([cur_verb, verb_form])
removeMatch = []
for pair in match_pairs:
verb = pair[0]
verb_form = pair[1]
adjacent = False
if verb.hasDobj:
d_ix = verb_form.index("<dobj>")
if not "<dobj>" == verb_form[-1]:
if verb_form[d_ix + 1] == "<iobj>":
adjacent = True
if verb_form[d_ix - 1] == "<iobj>":
adjacent = True
if adjacent and verb.dscope in ["text", "direction"]:
(dobj, iobj) = self._adjacentStrObj(verb_form, "<dobj>") or (None, None)
elif adjacent and verb.iscope in ["text", "direction"]:
(dobj, iobj) = self._adjacentStrObj(verb_form, "<iobj>") or (None, None)
else:
dobj = self._analyzeSyntax(verb_form, "<dobj>")
iobj = self._analyzeSyntax(verb_form, "<iobj>")
pair += [dobj, iobj]
extra = self.checkExtra(verb, verb_form, dobj, iobj)
if len(extra) > 0:
removeMatch.append(pair)
elif verb.hasDobj and not verb.impDobj and not dobj:
removeMatch.append(pair)
elif verb.hasIobj and not verb.impIobj and not iobj:
removeMatch.append(pair)
elif (
verb.dscope == "direction" and not self.directionRangeCheck(dobj)
) or (verb.iscope == "direction" and not self.directionRangeCheck(iobj)):
removeMatch.append(pair)
for x in removeMatch:
match_pairs.remove(x)
if len(match_pairs) == 1:
self.command.verb = match_pairs[0][0]
self.command.verb_form = match_pairs[0][1]
self.command.dobj = GrammarObject(match_pairs[0][2])
self.command.iobj = GrammarObject(match_pairs[0][3])
return
raise ParserError(
'I understood as far as "'
+ self.command.primary_verb_token
+ '".<br>(Type VERB HELP '
+ self.command.primary_verb_token.upper()
+ " for help with phrasing.) ",
)
def checkExtra(self, verb, verb_form, dobj, iobj):
"""
Checks for words unaccounted for by verb form
Returns a list, empty or containing one word strings (extra words)
"""
accounted = []
extra = list(self.command.tokens)
for word in extra:
if word in english.prepositions or word in english.keywords:
if word in verb_form:
accounted.append(word)
continue
for obj in [dobj, iobj]:
if not obj:
break
noun = obj[-1]
if noun in self.game.nouns:
for item in self.game.nouns[noun]:
if word in item.adjectives:
accounted.append(word)
break
if word in accounted:
break
if (
word in ("up", "down", "in", "out")
and verb.iscope == "direction"
and (
(iobj and len(iobj) == 1 and word in iobj)
or (dobj and len(dobj) == 1 and word in dobj)
)
):
accounted.append(word)
elif word in verb_form:
accounted.append(word)
elif dobj and word in dobj:
accounted.append(word)
elif iobj and word in iobj:
accounted.append(word)
for word in accounted:
if word in extra:
extra.remove(word)
return extra
def matchPrepKeywords(self):
"""
Check for prepositions in the self.tokenized player command, and remove any candidate
verbs whose preposition does not match
Returns a list of Verb objects or an empty list
"""
remove_verb = []
for p in english.prepositions:
if p in self.command.tokens and len(self.command.tokens) > 1:
exempt = False
for verb in self.command.verb_matches:
ix = self.command.tokens.index(p) + 1
if ix < len(self.command.tokens):
noun = self.command.tokens[ix]
while not noun in self.game.nouns:
ix = ix + 1
if ix >= len(self.command.tokens):
break
noun = self.command.tokens[ix]
if noun in self.game.nouns:
for item in self.game.nouns[noun]:
if p in item.adjectives:
exempt = True
if p in ["up", "down", "in", "out"]:
if verb.iscope == "direction" or verb.dscope == "direction":
exempt = True
if (
not (verb.preposition or not p in verb.preposition)
and not exempt
):
remove_verb.append(verb)
for p in english.keywords:
if p in self.command.tokens and len(self.command.tokens) > 1:
exempt = False
for verb in self.command.verb_matches:
ix = self.command.tokens.index(p) + 1
if ix < len(self.command.tokens):
noun = self.command.tokens[ix]
while not noun in self.game.nouns:
ix = ix + 1
if ix >= len(self.command.tokens):
break
noun = self.command.tokens[ix]
if noun in self.game.nouns:
for item in self.game.nouns[noun]:
if p in item.adjectives:
exempt = True
if not (verb.keywords or not p in verb.keywords) and not exempt:
remove_verb.append(verb)
for verb in remove_verb:
if verb in self.command.verb_matches:
self.command.verb_matches.remove(verb)
def getGrammarObj(self):
"""
Analyze input using the chosen verb_form to find any objects
"""
# first, choose the correct syntax
if not self.command.verb_form:
return None
# check if dobj and iobj are adjacent
adjacent = False
if self.command.verb.hasDobj:
d_ix = self.command.verb_form.index("<dobj>")
if not "<dobj>" == self.command.verb_form[-1]:
if self.command.verb_form[d_ix + 1] == "<iobj>":
adjacent = True
if self.command.verb_form[d_ix - 1] == "<iobj>":
adjacent = True
if self.command.verb.hasDobj and not self.command.dobj:
self.command.dobj = GrammarObject(
self._analyzeSyntax(self.command.verb_form, "<dobj>")
)
if not self.command.dobj.tokens and not self.command.verb.impDobj:
raise VerbDefinitionError(
f"<dobj> tag was not found in verb form {verb_form}, "
f"but associated verb {self.command.verb} has dobj=True"
)
if self.command.verb.hasIobj and not self.command.iobj:
self.command.iobj = GrammarObject(
self._analyzeSyntax(self.command.verb_form, "<iobj>")
)
if not self.command.iobj.tokens and not self.command.verb.imp_dobj:
raise VerbDefinitionError(
f"<iobj> tag was not found in verb form {verb_form}, "
f"but associated verb {self.command.verb} has iobj=True"
)
self.checkForImplicitObjects()
def _adjacentStrObj(self, verb_form, strobj_tag):
dobj_ix = verb_form.index("<dobj>")
iobj_ix = verb_form.index("<iobj>")
if verb_form[-1] == "<dobj>":
before = verb_form[iobj_ix - 1]
after = None
elif verb_form[-1] == "<iobj>":
before = verb_form[dobj_ix - 1]
after = None
elif iobj_ix < dobj_ix:
before = verb_form[iobj_ix - 1]
after = verb_form[dobj_ix + 1]
else:
before = verb_form[dobj_ix - 1]
after = verb_form[iobj_ix + 1]
b_ix = self.command.tokens.index(before) + 1
if not after:
a_ix = None
objs = self.command.tokens[b_ix:]
else:
a_ix = self.command.tokens.index(after)
objs = self.command.tokens[b_ix:a_ix]
thing_follows_string = True
if (strobj_tag == "<dobj>" and dobj_ix > iobj_ix) or (
strobj_tag == "<iobj>" and iobj_ix > dobj_ix
):
thing_follows_string = False
if thing_follows_string:
if not objs[-1] in self.game.nouns or len(objs) < 2:
return None
things = self.game.nouns[objs[-1]]
end_str | |
# coding: utf-8
"""
Copyright (c) 2021 Aspose.BarCode for Cloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, division
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from aspose_barcode_cloud.api_client import ApiClient
class FolderApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def copy_folder(
self, src_path, dest_path, src_storage_name=None, dest_storage_name=None, async_req=False, **kwargs
):
"""Copy folder
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FolderApi().copy_folder(src_path, dest_path, async_req=True)
>>> result = thread.get()
:param str src_path: Source folder path e.g. '/src' # noqa: E501
:param str dest_path: Destination folder path e.g. '/dst' # noqa: E501
:param str src_storage_name: Source storage name # noqa: E501
:param str dest_storage_name: Destination storage name # noqa: E501
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if async_req:
return self.copy_folder_with_http_info(
src_path, dest_path, src_storage_name=src_storage_name, dest_storage_name=dest_storage_name, **kwargs
)
else:
(data) = self.copy_folder_with_http_info(
src_path, dest_path, src_storage_name=src_storage_name, dest_storage_name=dest_storage_name, **kwargs
)
return data
def copy_folder_with_http_info(self, src_path, dest_path, **kwargs):
"""Copy folder
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FolderApi().copy_folder_with_http_info(src_path, dest_path, async_req=True)
>>> result = thread.get()
:param str src_path: Source folder path e.g. '/src' # noqa: E501
:param str dest_path: Destination folder path e.g. '/dst' # noqa: E501
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = {"src_path", "dest_path", "src_storage_name", "dest_storage_name"}
all_params.add("async_req")
all_params.add("_return_http_data_only")
all_params.add("_preload_content")
all_params.add("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'" " to method copy_folder" % key)
if val is None:
continue
params[key] = val
del params["kwargs"]
# verify the required parameter "src_path" is set
if "src_path" not in params or params["src_path"] is None:
raise ValueError("Missing the required parameter 'src_path' when calling 'copy_folder'")
# verify the required parameter "dest_path" is set
if "dest_path" not in params or params["dest_path"] is None:
raise ValueError("Missing the required parameter 'dest_path' when calling 'copy_folder'")
collection_formats = {}
path_params = {}
if "src_path" in params:
path_params["srcPath"] = params["src_path"]
query_params = []
if "dest_path" in params:
query_params.append(("destPath", params["dest_path"]))
if "src_storage_name" in params:
query_params.append(("srcStorageName", params["src_storage_name"]))
if "dest_storage_name" in params:
query_params.append(("destStorageName", params["dest_storage_name"]))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header "Accept"
header_params["Accept"] = self.api_client.select_header_accept(["application/json"])
# HTTP header "Content-Type"
header_params["Content-Type"] = self.api_client.select_header_content_type(["application/json"])
# Authentication setting
auth_settings = ["JWT"]
return self.api_client.call_api(
"/barcode/storage/folder/copy/{srcPath}",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def create_folder(self, path, storage_name=None, async_req=False, **kwargs):
"""Create the folder
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FolderApi().create_folder(path, async_req=True)
>>> result = thread.get()
:param str path: Folder path to create e.g. 'folder_1/folder_2/' # noqa: E501
:param str storage_name: Storage name # noqa: E501
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if async_req:
return self.create_folder_with_http_info(path, storage_name=storage_name, **kwargs)
else:
(data) = self.create_folder_with_http_info(path, storage_name=storage_name, **kwargs)
return data
def create_folder_with_http_info(self, path, **kwargs):
"""Create the folder
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FolderApi().create_folder_with_http_info(path, async_req=True)
>>> result = thread.get()
:param str path: Folder path to create e.g. 'folder_1/folder_2/' # noqa: E501
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = {"path", "storage_name"}
all_params.add("async_req")
all_params.add("_return_http_data_only")
all_params.add("_preload_content")
all_params.add("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'" " to method create_folder" % key)
if val is None:
continue
params[key] = val
del params["kwargs"]
# verify the required parameter "path" is set
if "path" not in params or params["path"] is None:
raise ValueError("Missing the required parameter 'path' when calling 'create_folder'")
collection_formats = {}
path_params = {}
if "path" in params:
path_params["path"] = params["path"]
query_params = []
if "storage_name" in params:
query_params.append(("storageName", params["storage_name"]))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header "Accept"
header_params["Accept"] = self.api_client.select_header_accept(["application/json"])
# HTTP header "Content-Type"
header_params["Content-Type"] = self.api_client.select_header_content_type(["application/json"])
# Authentication setting
auth_settings = ["JWT"]
return self.api_client.call_api(
"/barcode/storage/folder/{path}",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def delete_folder(self, path, storage_name=None, recursive=None, async_req=False, **kwargs):
"""Delete folder
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FolderApi().delete_folder(path, async_req=True)
>>> result = thread.get()
:param str path: Folder path e.g. '/folder' # noqa: E501
:param str storage_name: Storage name # noqa: E501
:param bool recursive: Enable to delete folders, subfolders and files # noqa: E501
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if async_req:
return self.delete_folder_with_http_info(path, storage_name=storage_name, recursive=recursive, **kwargs)
else:
(data) = self.delete_folder_with_http_info(path, storage_name=storage_name, recursive=recursive, **kwargs)
return data
def delete_folder_with_http_info(self, path, **kwargs):
"""Delete folder
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FolderApi().delete_folder_with_http_info(path, async_req=True)
>>> result = thread.get()
:param str path: Folder path e.g. '/folder' # noqa: E501
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = {"path", "storage_name", "recursive"}
all_params.add("async_req")
all_params.add("_return_http_data_only")
all_params.add("_preload_content")
all_params.add("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'" " to method delete_folder" % key)
if val is None:
continue
params[key] = val
del params["kwargs"]
# verify the required parameter "path" is set
if "path" not in params or params["path"] is None:
raise ValueError("Missing the required parameter 'path' when calling 'delete_folder'")
collection_formats = {}
path_params = {}
if "path" in params:
path_params["path"] = params["path"]
query_params = []
if "storage_name" in params:
query_params.append(("storageName", params["storage_name"]))
if "recursive" in params:
query_params.append(("recursive", params["recursive"]))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header "Accept"
header_params["Accept"] = self.api_client.select_header_accept(["application/json"])
# HTTP header "Content-Type"
header_params["Content-Type"] = self.api_client.select_header_content_type(["application/json"])
# Authentication setting
auth_settings = ["JWT"]
return self.api_client.call_api(
"/barcode/storage/folder/{path}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_files_list(self, path, storage_name=None, async_req=False, **kwargs):
"""Get all files and folders within a folder
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = FolderApi().get_files_list(path, async_req=True)
>>> result = thread.get()
:param str path: Folder path e.g. '/folder' # noqa: E501
:param str storage_name: Storage name # noqa: E501
:param async_req bool
:return: FilesList
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if async_req:
return self.get_files_list_with_http_info(path, | |
ppstm):
"""
Clone(self: IStream) -> IStream
Creates a new stream object with its own seek pointer that references the same
bytes as the original stream.
"""
pass
def Commit(self, grfCommitFlags):
"""
Commit(self: IStream, grfCommitFlags: int)
Ensures that any changes made to a stream object that is open in transacted
mode are reflected in the parent storage.
grfCommitFlags: A value that controls how the changes for the stream object are committed.
"""
pass
def CopyTo(self, pstm, cb, pcbRead, pcbWritten):
"""
CopyTo(self: IStream, pstm: IStream, cb: Int64, pcbRead: IntPtr, pcbWritten: IntPtr)
Copies a specified number of bytes from the current seek pointer in the stream
to the current seek pointer in another stream.
pstm: A reference to the destination stream.
cb: The number of bytes to copy from the source stream.
pcbRead: On successful return, contains the actual number of bytes read from the source.
pcbWritten: On successful return, contains the actual number of bytes written to the
destination.
"""
pass
def LockRegion(self, libOffset, cb, dwLockType):
"""
LockRegion(self: IStream, libOffset: Int64, cb: Int64, dwLockType: int)
Restricts access to a specified range of bytes in the stream.
libOffset: The byte offset for the beginning of the range.
cb: The length of the range, in bytes, to restrict.
dwLockType: The requested restrictions on accessing the range.
"""
pass
def Read(self, pv, cb, pcbRead):
"""
Read(self: IStream, cb: int, pcbRead: IntPtr) -> Array[Byte]
Reads a specified number of bytes from the stream object into memory starting
at the current seek pointer.
cb: The number of bytes to read from the stream object.
pcbRead: A pointer to a ULONG variable that receives the actual number of bytes read
from the stream object.
"""
pass
def Revert(self):
"""
Revert(self: IStream)
Discards all changes that have been made to a transacted stream since the last
System.Runtime.InteropServices.ComTypes.IStream.Commit(System.Int32) call.
"""
pass
def Seek(self, dlibMove, dwOrigin, plibNewPosition):
"""
Seek(self: IStream, dlibMove: Int64, dwOrigin: int, plibNewPosition: IntPtr)
Changes the seek pointer to a new location relative to the beginning of the
stream, to the end of the stream, or to the current seek pointer.
dlibMove: The displacement to add to dwOrigin.
dwOrigin: The origin of the seek. The origin can be the beginning of the file, the
current seek pointer, or the end of the file.
plibNewPosition: On successful return, contains the offset of the seek pointer from the
beginning of the stream.
"""
pass
def SetSize(self, libNewSize):
"""
SetSize(self: IStream, libNewSize: Int64)
Changes the size of the stream object.
libNewSize: The new size of the stream as a number of bytes.
"""
pass
def Stat(self, pstatstg, grfStatFlag):
"""
Stat(self: IStream, grfStatFlag: int) -> STATSTG
Retrieves the System.Runtime.InteropServices.STATSTG structure for this stream.
grfStatFlag: Members in the STATSTG structure that this method does not return, thus saving
some memory allocation operations.
"""
pass
def UnlockRegion(self, libOffset, cb, dwLockType):
"""
UnlockRegion(self: IStream, libOffset: Int64, cb: Int64, dwLockType: int)
Removes the access restriction on a range of bytes previously restricted with
the
System.Runtime.InteropServices.ComTypes.IStream.LockRegion(System.Int64,System.I
nt64,System.Int32) method.
libOffset: The byte offset for the beginning of the range.
cb: The length, in bytes, of the range to restrict.
dwLockType: The access restrictions previously placed on the range.
"""
pass
def Write(self, pv, cb, pcbWritten):
"""
Write(self: IStream, pv: Array[Byte], cb: int, pcbWritten: IntPtr)
Writes a specified number of bytes into the stream object starting at the
current seek pointer.
pv: The buffer to write this stream to.
cb: The number of bytes to write to the stream.
pcbWritten: On successful return, contains the actual number of bytes written to the stream
object. If the caller sets this pointer to System.IntPtr.Zero, this method does
not provide the actual number of bytes written.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class ITypeComp:
""" Provides the managed definition of the ITypeComp interface. """
def Bind(self, szName, lHashVal, wFlags, ppTInfo, pDescKind, pBindPtr):
"""
Bind(self: ITypeComp, szName: str, lHashVal: int, wFlags: Int16) -> (ITypeInfo, DESCKIND, BINDPTR)
Maps a name to a member of a type, or binds global variables and functions
contained in a type library.
szName: The name to bind.
lHashVal: A hash value for szName computed by LHashValOfNameSys.
wFlags: A flags word containing one or more of the invoke flags defined in the
INVOKEKIND enumeration.
"""
pass
def BindType(self, szName, lHashVal, ppTInfo, ppTComp):
"""
BindType(self: ITypeComp, szName: str, lHashVal: int) -> (ITypeInfo, ITypeComp)
Binds to the type descriptions contained within a type library.
szName: The name to bind.
lHashVal: A hash value for szName determined by LHashValOfNameSys.
"""
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class ITypeInfo:
""" Provides the managed definition of the Component Automation ITypeInfo interface. """
def AddressOfMember(self, memid, invKind, ppv):
"""
AddressOfMember(self: ITypeInfo, memid: int, invKind: INVOKEKIND) -> IntPtr
Retrieves the addresses of static functions or variables, such as those defined
in a DLL.
memid: The member ID of the static member's address to retrieve.
invKind: One of the System.Runtime.InteropServices.ComTypes.INVOKEKIND values that
specifies whether the member is a property, and if so, what kind.
"""
pass
def CreateInstance(self, pUnkOuter, riid, ppvObj):
"""
CreateInstance(self: ITypeInfo, pUnkOuter: object, riid: Guid) -> (Guid, object)
Creates a new instance of a type that describes a component class (coclass).
pUnkOuter: The object that acts as the controlling IUnknown.
riid: The IID of the interface that the caller uses to communicate with the resulting
object.
"""
pass
def GetContainingTypeLib(self, ppTLB, pIndex):
"""
GetContainingTypeLib(self: ITypeInfo) -> (ITypeLib, int)
Retrieves the type library that contains this type description and its index
within that type library.
"""
pass
def GetDllEntry(self, memid, invKind, pBstrDllName, pBstrName, pwOrdinal):
"""
GetDllEntry(self: ITypeInfo, memid: int, invKind: INVOKEKIND, pBstrDllName: IntPtr, pBstrName: IntPtr, pwOrdinal: IntPtr)
Retrieves a description or specification of an entry point for a function in a
DLL.
memid: The ID of the member function whose DLL entry description is to be returned.
invKind: One of the System.Runtime.InteropServices.ComTypes.INVOKEKIND values that
specifies the kind of member identified by memid.
pBstrDllName: If not null, the function sets pBstrDllName to a BSTR that contains the name of
the DLL.
pBstrName: If not null, the function sets lpbstrName to a BSTR that contains the name of
the entry point.
pwOrdinal: If not null, and the function is defined by an ordinal, then lpwOrdinal is set
to point to the ordinal.
"""
pass
def GetDocumentation(self, index, strName, strDocString, dwHelpContext, strHelpFile):
"""
GetDocumentation(self: ITypeInfo, index: int) -> (str, str, int, str)
Retrieves the documentation string, the complete Help file name and path, and
the context ID for the Help topic for a specified type description.
index: The ID of the member whose documentation is to be returned.
"""
pass
def GetFuncDesc(self, index, ppFuncDesc):
"""
GetFuncDesc(self: ITypeInfo, index: int) -> IntPtr
Retrieves the System.Runtime.InteropServices.FUNCDESC structure that contains
information about a specified function.
index: The index of the function description to return.
"""
pass
def GetIDsOfNames(self, rgszNames, cNames, pMemId):
"""
GetIDsOfNames(self: ITypeInfo, rgszNames: Array[str], cNames: int) -> Array[int]
Maps between member names and member IDs, and parameter names and parameter IDs.
rgszNames: An array of names to map.
cNames: The count of names to map.
"""
pass
def GetImplTypeFlags(self, index, pImplTypeFlags):
"""
GetImplTypeFlags(self: ITypeInfo, index: int) -> IMPLTYPEFLAGS
Retrieves the System.Runtime.InteropServices.IMPLTYPEFLAGS value for | |
import torch
from torch import nn
from torch import optim
from model.model_preprocessing import Preprocess
from model.model_encoder import pulsar_encoder
from model.model_tcn_multi import TemporalConvNet_multi
from model.model_output import OutputLayer
from model.model_multiclass import MultiClass
try:
from model.model_classifier_ffa import classifier_ffa
except ImportError:
print("FFA classifier not imported properly")
pass
from model.model_classifier_stft import classifier_stft
from model.model_candidate_creator import candidate_creator
from torch.utils.checkpoint import checkpoint
import numpy as np
import sys
import torch.nn.functional as F
import scipy.signal
import matplotlib.pyplot as plt
import argparse
import json
from data_loader import dataset
class pulsar_net(nn.Module):
# Whole pulsar net. By default it contains an classifier as well as as a classifier
def __init__(self, model_para, input_shape, lr, no_pad=False,
mode='full', no_reg=0, clamp=[0, -1000, 1000],
gauss=(27, 15 / 4, 1, 1),
cmask=False, rfimask=False,
dm0_class=False, class_configs=[''], data_resolution=1, crop=0,
edge=[0, 0], class_weight=[1, 1], added_cands=0, psr_cands=False, added_channel_cands=0,
cands_threshold=0, channel_classification=False):
super().__init__()
print('Creating neural net.')
self.model_para = model_para
self.set_mode(mode)
self.input_shape = input_shape
self.stride = model_para.encoder_stride
self.pool = model_para.encoder_pooling
self.no_pad = no_pad
self.crop = crop
self.edge = edge
self.down_fac = (
self.stride * self.pool) ** len(model_para.encoder_channels)
self.output_chan = model_para.output_channels
self.out_length = self.input_shape[1] // self.down_fac - self.crop * 2
self.data_resolution = data_resolution
self.output_resolution = data_resolution * self.down_fac
self.added_cands = added_cands
self.added_channel_cands = added_channel_cands
self.psr_cands = psr_cands
self.cands_threshold = 0
# self.channel_classification = channel_classification
self.candidate_creator = candidate_creator(added_cands=self.added_cands, added_channel_cands=self.added_channel_cands,
psr_cands=self.psr_cands,
candidate_threshold=self.cands_threshold, output_chan=self.output_chan)
if self.added_cands or self.psr_cands or self.added_channel_cands:
self.cand_based = True
else:
self.cand_based = False
self.set_preprocess(self.input_shape, model_para.initial_norm,
bias=clamp[0], clamp=clamp[1:], dm0_subtract=model_para.subtract_dm0,
groups=model_para.initial_norm_groups, cmask=cmask, rfimask=rfimask)
if not model_para.concat_dm0:
input_encoder = self.input_shape
else:
input_encoder = (self.input_shape[0] + 1, self.input_shape[1])
self.encoder = pulsar_encoder(input_encoder, model_para,
no_pad=no_pad)
if model_para.tcn_2_layers:
self.use_tcn = 1
self.tcn = TemporalConvNet_multi(model_para.tcn_2_channels, model_para.tcn_2_channels_increase,
model_para.tcn_2_layers, norm_groups=model_para.tcn_2_norm_groups,
conv_groups=model_para.tcn_2_conv_groups,
kernel_size=model_para.tcn_2_kernel,
dilation=model_para.tcn_2_dilation,
levels=model_para.tcn_2_downsample_levels,
downsample_factor=model_para.tcn_2_downsample_factor)
dec_input = self.tcn.output_chan
else:
self.use_tcn = 0
dec_input = model_para.tcn_2_channels
self.dec_input = dec_input
self.use_output_layer = 1
self.output_layer = OutputLayer(
dec_input, model_para.output_intermediate_channels, model_para.output_final_nonlin,
dropout=model_para.output_dropout, kernel=model_para.output_kernel,
output_channels=self.output_chan)
self.create_classifier_levels(
class_configs, no_reg, dm0_class=dm0_class, channel_classification=channel_classification)
self.create_loss_func(class_weight)
self.freeze = 0
# (int(111 / self.down_fac), int(15 / self.down_fac))
self.gauss_para = gauss
if self.gauss_para[0] % 2 == 0:
self.gauss_para[0] += 1
self.gaussian_kernel = torch.Tensor(scipy.signal.gaussian(
*self.gauss_para[:2])).unsqueeze(0).unsqueeze(0).unsqueeze(0) * self.gauss_para[2]
self.gaussian_kernel = torch.clamp(
self.gaussian_kernel, 0, self.gauss_para[3])
# self.crop = self.tcn.biggest_pad
def create_classifier_levels(self, class_configs, no_reg=True, overwrite=True, dm0_class=False, channel_classification=False):
self.dm0_class = dm0_class
# if hasattr(self, 'classifiers'):
# for classifier in self.classifiers:
# print(classifier)
# del classifier
self.channel_classification = channel_classification
if self.channel_classification:
self.class_channel_output = self.output_chan
else:
self.class_channel_output = 1
if overwrite:
if hasattr(self, 'classifiers'):
del self.classifiers
del_list = []
for (child_name, child) in self.named_modules():
if child_name.startswith('classifier'):
del_list.append(child_name)
for del_element in del_list:
try:
self.__delattr__(del_element)
except AttributeError:
pass
self.classifiers = []
self.classifier_names = []
added = ''
else:
added = '_1'
for single_classifier in self.classifiers:
single_classifier.dm0_class = dm0_class
self.no_reg = no_reg
# if not hasattr(self, 'out_length'):
self.out_length = self.input_shape[1] // self.down_fac - self.crop * 2
if self.no_reg:
self.final_output = 3
else:
self.final_output = 3
if not hasattr(self, 'output_chan'):
self.output_chan = 1
for config in class_configs:
with open(f"./model_configs/{config}") as json_data_file:
class_para_dict = json.load(json_data_file)
class_para = argparse.Namespace(**class_para_dict)
# if 'ffa' in self.class_mode:
if class_para.class_type == 'ffa':
class_name = f"classifier_ffa{added}"
while hasattr(self, class_name):
class_name += '_'
setattr(self, class_name, classifier_ffa(self.output_resolution, no_reg=True, dm0_class=False,
pooling=class_para.pooling, nn_layers=class_para.nn_layers, channels=class_para.channels,
kernel=class_para.kernel, norm=class_para.norm, use_ampl=class_para.only_use_amplitude,
min_period=class_para.min_period, max_period=class_para.max_period, bins_min=class_para.bins_min,
bins_max=class_para.bins_max,
remove_threshold=class_para.remove_dynamic_threshold,
name=f"classifier_ffa{added}",
))
self.classifiers.append(
getattr(self, class_name))
# if 'stft_comb' in self.class_mode:
if class_para.class_type == 'stft':
class_name = f"classifier_{class_para.name}"
while hasattr(self, class_name):
class_name += '_'
setattr(self, class_name, classifier_stft(self.out_length, self.output_resolution, class_para,
dm0_class=dm0_class,
name=class_name, channel_classification=channel_classification))
self.classifiers.append(
getattr(self, class_name))
self.classifier_names.append(class_name)
# else:
# self.classifier = None
self.used_classifiers = len(self.classifiers)
if self.used_classifiers > 1:
self.use_multi_class = 1
self.multi_class = MultiClass(self.used_classifiers, self.no_reg)
else:
self.use_multi_class = 0
for clas in self.classifiers:
clas.channel_classification = channel_classification
# turn of channel classification when ffa classifier is used
if any("ffa" in clas_name for clas_name in self.classifier_names):
print('No channel classification is used due to inclusion of FFA classifier.')
for clas in self.classifiers:
clas.channel_classification = False
self.channel_classification = False
self.class_channel_output = 1
def forward(self, x, target=None):
# y = x - tile(self.pool(x)[:,:,:], 2, 1000)
# x.requires_grad=False
# return checkpoint(self.apply_net, x)
return self.apply_net(x, target)
def save_epoch(self, epoch):
self.epoch = epoch
def save_noise(self, noise):
self.noise = noise
def save_mean_vals(self, mean_period, mean_dm, mean_freq):
self.mean_vals = (mean_period, mean_dm, mean_freq)
def set_mode(self, mode):
self.mode = mode
if mode != 'dedisperse' and mode != 'full' and mode != 'classifier' and mode != 'short':
print('Unkown mode!')
sys.exit()
def calc_tcn_out(self, x):
chunks = self.net_chunks[0]
overlap = self.net_chunks[1]
split_val = np.linspace(0, x.shape[2], chunks + 1, dtype=int)
output_tensor = torch.zeros(
x.shape[0], self.dec_input, x.shape[2] // self.down_fac).to(x.device)
for chunk in range(chunks):
ini_start_val = split_val[chunk]
ini_end_val = split_val[chunk + 1]
start_val = np.max((ini_start_val - overlap, 0))
end_val = np.min((ini_end_val + overlap, x.shape[2]))
actual_start_overlap = (ini_start_val - start_val) // self.down_fac
actual_end_value = (actual_start_overlap +
ini_end_val - ini_start_val) // self.down_fac
out_start = split_val[chunk] // self.down_fac
out_end = split_val[chunk + 1] // self.down_fac
# output_tensor[:,:,out_start:out_end] = self.calc_tcn_chunk(
# x[:,:,start_val:end_val])[:,:,actual_start_overlap:actual_end_value]
output_tensor[:, :, out_start:out_end] = checkpoint(self.calc_tcn_chunk,
x[:, :, start_val:end_val])[:, :, actual_start_overlap:actual_end_value]
return output_tensor
def calc_tcn_chunk(self, x):
if hasattr(self, 'encoder'):
y = self.encoder(x)
else:
y = x
if self.use_tcn:
out = self.tcn(y)
else:
out = y
return out
def apply_net(self, input, target=None):
if target is not None:
target = target.to(input.device)
if len(target.shape) == 1:
target = target.unsqueeze(0)
input = self.preprocess(input)
encoded = self.calc_tcn_out(input)
if self.crop:
encoded = encoded[:, :, self.crop:-self.crop].contiguous()
class_tensor = torch.zeros(
(input.shape[0], self.used_classifiers, self.final_output, self.class_channel_output)).to(input.device)
# switch = 0
j = 0
encoded = self.output_layer(encoded)
if self.mode == 'dedisperse':
return encoded, torch.empty(0, requires_grad=True), torch.empty(0, requires_grad=True), (torch.empty(0, requires_grad=True), torch.empty(0, requires_grad=True))
if hasattr(self, 'break_grad'):
if self.break_grad:
# if self.train:
encoded_ = encoded.detach()
else:
encoded_ = encoded
else:
encoded_ = encoded
if hasattr(self, 'dm0_class'):
if self.dm0_class:
encoded_ = self.append_dm0(input, encoded_)
# print(class_tensor.shape)
for classifier in self.classifiers:
class_tensor[:, j, :,:], class_data = classifier(encoded_)
if self.cand_based:
if hasattr(classifier, 'final_cands'):
final_layer = classifier.final_cands
else:
final_layer = classifier.final
class_candidates, class_targets = self.candidate_creator(
class_data, final_layer, classifier.channel_correction, target)
if j == 0:
candidates = class_candidates
cand_targets = class_targets
else:
candidates = torch.cat((candidates, class_candidates), 0)
cand_targets = torch.cat((cand_targets, class_targets), 0)
else:
candidates = torch.empty(0, requires_grad=True)
cand_targets = torch.empty(0, requires_grad=True)
j += 1
if self.use_multi_class:
classifier_output_multi = self.multi_class(
class_tensor)
return encoded, classifier_output_multi, class_tensor, (candidates, cand_targets)
return encoded, class_tensor[:, 0, :,:], torch.empty(0, requires_grad=True), (candidates, cand_targets)
# def apply_classifier(self, input):
# class_tensor = torch.zeros(
# (input.shape[0], self.used_classifiers, self.final_output)).to(input.device)
# j = 0
# for classifier in self.classifiers:
# class_tensor[:, j, :] = classifier(input, target)
# j += 1
# if self.cand_based:
# pass
# if self.use_multi_class:
# classifier_output_multi = self.multi_class(
# class_tensor)
# return input, classifier_output_multi, class_tensor
# return input, class_tensor[:, 0, :], torch.empty(0, requires_grad=True)
def reset_optimizer(self, lr, decay=0, freeze=0, init=0):
# self.freeze = freeze
if init:
learn_rate_1 = lr[0]
else:
learn_rate_1 = lr[1]
learn_rate_2 = learn_rate_1 * lr[2]
# print(learn_rate_2)
# encoder_params = list(self.encoder.network[freeze:].parameters())
# if self.use_tcn:
# encoder_params += list(self.tcn.parameters())
# # print(encoder_params)
# if freeze == 0:
# second_params = self.decoder.parameters()
# else:
# second_params = self.decoder.network[:-freeze].parameters()
if freeze <= 0:
# parameters = self.parameters()
class_params = list()
encoder_params = list()
if self.use_multi_class:
class_params += list(self.multi_class.parameters())
for classifier in self.classifiers:
class_params += list(classifier.parameters())
encoder_params += list(self.preprocess.parameters())
encoder_params += list(self.encoder.parameters())
if hasattr(self, 'tcn'):
encoder_params += list(self.tcn.parameters())
encoder_params += list(self.output_layer.parameters())
self.frozen = 0
# for para in self.parameters():
# print(para)
# if (para not in class_params):
# encoder_params += list(para)
else:
print('using freeze')
parameters = list()
if self.use_multi_class:
parameters += list(self.multi_class.parameters())
for classifier in self.classifiers:
parameters += list(classifier.parameters())
self.frozen = 1
if freeze <= 0:
self.optimizer = optim.Adam([{'params': encoder_params, 'lr': learn_rate_2},
{'params': class_params, 'lr': learn_rate_1}], lr=learn_rate_1, weight_decay=decay)
else:
self.optimizer = optim.Adam(
parameters, lr=learn_rate_1, weight_decay=decay)
min_lr = learn_rate_1 / 100
# print(parameters)
# print(self.optimizer)
# if lr[2] == 0:
# min_lr = learn_rate_1 / 100
# else:
# min_lr = min(learn_rate_1, learn_rate_2) / 100
# else:
# self.optimizer = optim.Adam(self.parameters(), lr=lr[0])
# self.scheduler = CosineAnnealingLRW(
# self.optimizer, T_max=5, cycle_mult=4, eta_min=min_lr, last_epoch=-1)
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, patience=3, factor=0.5)
def create_loss_func(self, class_weight=[1, 1]):
# if not self.binary:
# self.loss_autoenc = nn.MSELoss()
# else:
# self.loss_autoenc = nn.BCEWithLogitsLoss(pos_weight=torch.full((3200,1), 20))
# self.loss_autoenc = nn.BCEWithLogitsLoss()
# self.loss_autoenc = nn.L1Loss()
# self.loss_autoenc = nn.BCEWithLogitsLoss()
# out_length = int(self.input_shape[1]/4 -self.crop*2)
# # self.loss_autoenc = nn.BCEWithLogitsLoss(pos_weight=torch.full((1, out_length), self.bce_weight))
self.loss_autoenc = nn.MSELoss().to(next(self.parameters()).device)
# self.loss_autoenc = nn.BCELoss()
self.loss_1 = nn.MSELoss(reduction='sum').to(
next(self.parameters()).device)
self.loss_2 = nn.CrossEntropyLoss(weight=torch.Tensor(
class_weight)).to(next(self.parameters()).device)
def gauss_smooth(self, tensor):
self.gaussian_kernel = self.gaussian_kernel.to(tensor.device)
| |
#########################
# GLOBAL VARIABLES USED #
#########################
ai_name = 'F.R.I.D.Y.'.lower()
EXIT_COMMANDS = ['bye','exit','quit','shut down', 'shutdown']
rec_email, rec_phoneno = "", ""
WAEMEntry = None
avatarChoosen = 0
choosedAvtrImage = None
botChatTextBg = "#007cc7"
botChatText = "white"
userChatTextBg = "#4da8da"
chatBgColor = '#12232e'
background = '#203647'
textColor = 'white'
AITaskStatusLblBG = '#203647'
KCS_IMG = 1 #0 for light, 1 for dark
voice_id = 0 #0 for female, 1 for male
ass_volume = 1 #max volume
ass_voiceRate = 200 #normal voice rate
""" User Created Modules """
try:
import normalChat
import math_function
import appControl
import webScrapping
import game
from userHandler import UserData
import timer
from FACE_UNLOCKER import clickPhoto, viewPhoto
import dictionary
import ToDo
import fileHandler
except Exception as e:
raise e
""" System Modules """
try:
import os
import speech_recognition as sr
import pyttsx3
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter import colorchooser
from PIL import Image, ImageTk
from time import sleep
from threading import Thread
except Exception as e:
print(e)
########################################## LOGIN CHECK ##############################################
try:
user = UserData()
user.extractData()
ownerName = user.getName().split()[0]
ownerDesignation = "Sir"
if user.getGender()=="Female": ownerDesignation = "Ma'am"
ownerPhoto = user.getUserPhoto()
except Exception as e:
print("You're not Registered Yet !\nRun SECURITY.py file to register your face.")
raise SystemExit
########################################## BOOT UP WINDOW ###########################################
def ChangeSettings(write=False):
import pickle
global background, textColor, chatBgColor, voice_id, ass_volume, ass_voiceRate, AITaskStatusLblBG, KCS_IMG, botChatTextBg, botChatText, userChatTextBg
setting = {'background': background,
'textColor': textColor,
'chatBgColor': chatBgColor,
'AITaskStatusLblBG': AITaskStatusLblBG,
'KCS_IMG': KCS_IMG,
'botChatText': botChatText,
'botChatTextBg': botChatTextBg,
'userChatTextBg': userChatTextBg,
'voice_id': voice_id,
'ass_volume': ass_volume,
'ass_voiceRate': ass_voiceRate
}
if write:
with open('userData/settings.pck', 'wb') as file:
pickle.dump(setting, file)
return
try:
with open('userData/settings.pck', 'rb') as file:
loadSettings = pickle.load(file)
background = loadSettings['background']
textColor = loadSettings['textColor']
chatBgColor = loadSettings['chatBgColor']
AITaskStatusLblBG = loadSettings['AITaskStatusLblBG']
KCS_IMG = loadSettings['KCS_IMG']
botChatText = loadSettings['botChatText']
botChatTextBg = loadSettings['botChatTextBg']
userChatTextBg = loadSettings['userChatTextBg']
voice_id = loadSettings['voice_id']
ass_volume = loadSettings['ass_volume']
ass_voiceRate = loadSettings['ass_voiceRate']
except Exception as e:
pass
if os.path.exists('userData/settings.pck')==False:
ChangeSettings(True)
def getChatColor():
global chatBgColor
chatBgColor = myColor[1]
colorbar['bg'] = chatBgColor
chat_frame['bg'] = chatBgColor
root1['bg'] = chatBgColor
def changeTheme():
global background, textColor, AITaskStatusLblBG, KCS_IMG, botChatText, botChatTextBg, userChatTextBg, chatBgColor
if themeValue.get()==1:
background, textColor, AITaskStatusLblBG, KCS_IMG = "#203647", "white", "#203647",1
cbl['image'] = cblDarkImg
kbBtn['image'] = kbphDark
settingBtn['image'] = sphDark
AITaskStatusLbl['bg'] = AITaskStatusLblBG
botChatText, botChatTextBg, userChatTextBg = "white", "#007cc7", "#4da8da"
chatBgColor = "#12232e"
colorbar['bg'] = chatBgColor
else:
background, textColor, AITaskStatusLblBG, KCS_IMG = "#F6FAFB", "#303E54", "#14A769", 0
cbl['image'] = cblLightImg
kbBtn['image'] = kbphLight
settingBtn['image'] = sphLight
AITaskStatusLbl['bg'] = AITaskStatusLblBG
botChatText, botChatTextBg, userChatTextBg = "#494949", "#EAEAEA", "#23AE79"
chatBgColor = "#F6FAFB"
colorbar['bg'] = '#E8EBEF'
root['bg'], root2['bg'] = background, background
settingsFrame['bg'] = background
settingsLbl['fg'], userPhoto['fg'], userName['fg'], assLbl['fg'], voiceRateLbl['fg'], volumeLbl['fg'], themeLbl['fg'], chooseChatLbl['fg'] = textColor, textColor, textColor, textColor, textColor, textColor, textColor, textColor
settingsLbl['bg'], userPhoto['bg'], userName['bg'], assLbl['bg'], voiceRateLbl['bg'], volumeLbl['bg'], themeLbl['bg'], chooseChatLbl['bg'] = background, background, background, background, background, background, background, background
s.configure('Wild.TRadiobutton', background=background, foreground=textColor)
volumeBar['bg'], volumeBar['fg'], volumeBar['highlightbackground'] = background, textColor, background
chat_frame['bg'], root1['bg'] = chatBgColor, chatBgColor
userPhoto['activebackground'] = background
ChangeSettings(True)
def changeVoice(e):
global voice_id
voice_id=0
if assVoiceOption.get()=='Male': voice_id=1
engine.setProperty('voice', voices[voice_id].id)
ChangeSettings(True)
def changeVolume(e):
global ass_volume
ass_volume = volumeBar.get() / 100
engine.setProperty('volume', ass_volume)
ChangeSettings(True)
def changeVoiceRate(e):
global ass_voiceRate
temp = voiceOption.get()
if temp=='Very Low': ass_voiceRate = 100
elif temp=='Low': ass_voiceRate = 150
elif temp=='Fast': ass_voiceRate = 250
elif temp=='Very Fast': ass_voiceRate = 300
else: ass_voiceRate = 200
print(ass_voiceRate)
engine.setProperty('rate', ass_voiceRate)
ChangeSettings(True)
ChangeSettings()
############################################ SET UP VOICE ###########################################
try:
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[voice_id].id) #male
engine.setProperty('volume', ass_volume)
except Exception as e:
print(e)
####################################### SET UP TEXT TO SPEECH #######################################
def speak(text, display=False, icon=False):
AITaskStatusLbl['text'] = 'Speaking...'
if icon: Label(chat_frame, image=botIcon, bg=chatBgColor).pack(anchor='w',pady=0)
if display: attachTOframe(text, True)
print('\n'+ai_name.upper()+': '+text)
try:
engine.say(text)
engine.runAndWait()
except:
print("Try not to type more...")
####################################### SET UP SPEECH TO TEXT #######################################
def record(clearChat=True, iconDisplay=True):
print('\nListening...')
AITaskStatusLbl['text'] = 'Listening...'
r = sr.Recognizer()
r.dynamic_energy_threshold = False
r.energy_threshold = 4000
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
said = ""
try:
AITaskStatusLbl['text'] = 'Processing...'
said = r.recognize_google(audio)
print(f"\nUser said: {said}")
if clearChat:
clearChatScreen()
if iconDisplay: Label(chat_frame, image=userIcon, bg=chatBgColor).pack(anchor='e',pady=0)
attachTOframe(said)
except Exception as e:
print(e)
# speak("I didn't get it, Say that again please...")
if "connection failed" in str(e):
speak("Your System is Offline...", True, True)
return 'None'
return said.lower()
def voiceMedium():
while True:
query = record()
if query == 'None': continue
if isContain(query, EXIT_COMMANDS):
speak("Shutting down the System. Good Bye "+ownerDesignation+"!", True, True)
break
else: main(query.lower())
appControl.Win_Opt('close')
def keyboardInput(e):
user_input = UserField.get().lower()
if user_input!="":
clearChatScreen()
if isContain(user_input, EXIT_COMMANDS):
speak("Shutting down the System. Good Bye "+ownerDesignation+"!", True, True)
else:
Label(chat_frame, image=userIcon, bg=chatBgColor).pack(anchor='e',pady=0)
attachTOframe(user_input.capitalize())
Thread(target=main, args=(user_input,)).start()
UserField.delete(0, END)
###################################### TASK/COMMAND HANDLER #########################################
def isContain(txt, lst):
for word in lst:
if word in txt:
return True
return False
def main(text):
if "project" in text:
if isContain(text, ['make', 'create']):
speak("What do you want to give the project name ?", True, True)
projectName = record(False, False)
speak(fileHandler.CreateHTMLProject(projectName.capitalize()), True)
return
if "create" in text and "file" in text:
speak(fileHandler.createFile(text), True, True)
return
if "translate" in text:
speak("What do you want to translate?", True, True)
sentence = record(False, False)
speak("Which langauage to translate ?", True)
langauage = record(False, False)
result = normalChat.lang_translate(sentence, langauage)
if result=="None": speak("This langauage doesn't exists")
else:
speak(f"In {langauage.capitalize()} you would say:", True)
if langauage=="hindi":
attachTOframe(result.text, True)
speak(result.pronunciation)
else: speak(result.text, True)
return
if 'list' in text:
if isContain(text, ['add', 'create', 'make']):
speak("What do you want to add?", True, True)
item = record(False, False)
ToDo.toDoList(item)
speak("Alright, I added to your list", True)
return
if isContain(text, ['show', 'my list']):
items = ToDo.showtoDoList()
if len(items)==1:
speak(items[0], True, True)
return
attachTOframe('\n'.join(items), True)
speak(items[0])
return
if isContain(text, ['battery', 'system info']):
result = appControl.OSHandler(text)
if len(result)==2:
speak(result[0], True, True)
attachTOframe(result[1], True)
else:
speak(result, True, True)
return
if isContain(text, ['meaning', 'dictionary', 'definition', 'define']):
result = dictionary.translate(text)
speak(result[0], True, True)
if result[1]=='': return
speak(result[1], True)
return
if 'selfie' in text or ('click' in text and 'photo' in text):
speak("Sure "+ownerDesignation+"...", True, True)
clickPhoto()
speak('Do you want to view your clicked photo?', True)
query = record(False)
if isContain(query, ['yes', 'sure', 'yeah', 'show me']):
Thread(target=viewPhoto).start()
speak("Ok, here you go...", True, True)
else:
speak("No Problem "+ownerDesignation, True, True)
return
if 'volume' in text:
appControl.volumeControl(text)
Label(chat_frame, image=botIcon, bg=chatBgColor).pack(anchor='w',pady=0)
attachTOframe('Volume Settings Changed', True)
return
if isContain(text, ['timer', 'countdown']):
Thread(target=timer.startTimer, args=(text,)).start()
speak('Ok, Timer Started!', True, True)
return
if 'whatsapp' in text:
speak("Sure "+ownerDesignation+"...", True, True)
speak('Whom do you want to send the message?', True)
WAEMPOPUP("WhatsApp", "Phone Number")
attachTOframe(rec_phoneno)
speak('What is the message?', True)
message = record(False, False)
Thread(target=webScrapping.sendWhatsapp, args=(rec_phoneno, message,)).start()
speak("Message is on the way. Do not move away from the screen.")
attachTOframe("Message Sent", True)
return
if 'email' in text:
speak('Whom do you want to send the email?', True, True)
WAEMPOPUP("Email", "E-mail Address")
attachTOframe(rec_email)
speak('What is the Subject?', True)
subject = record(False, False)
speak('What message you want to send ?', True)
message = record(False, False)
Thread(target=webScrapping.email, args=(rec_email,message,subject,) ).start()
speak('Email has been Sent', True)
return
if isContain(text, ['covid','virus']):
result = webScrapping.covid(text)
if 'str' in str(type(result)):
speak(result, True, True)
return
speak(result[0], True, True)
result = '\n'.join(result[1])
attachTOframe(result, True)
return
if isContain(text, ['youtube','video']):
speak("Ok "+ownerDesignation+", here a video for you...", True, True)
try:
speak(webScrapping.youtube(text), True)
except Exception as e:
speak("Desired Result Not Found", True)
return
if isContain(text, ['search', 'image']):
if 'image' in text and 'show' in text:
Thread(target=showImages, args=(text,)).start()
speak('Here are the images...', True, True)
return
speak(webScrapping.googleSearch(text), True, True)
return
if isContain(text, ['map', 'direction']):
if "direction" in text:
speak('What is your starting location?', True, True)
startingPoint = record(False, False)
speak("Ok "+ownerDesignation+", Where you want to go?", True)
destinationPoint = record(False, False)
speak("Ok "+ownerDesignation+", Getting Directions...", True)
try:
distance = webScrapping.giveDirections(startingPoint, destinationPoint)
speak('You have to cover a distance of '+ distance, True)
except:
speak("I think location is not proper, Try Again!")
else:
webScrapping.maps(text)
speak('Here you go...', True, True)
return
if isContain(text, ['factorial','log','value of','math',' + ',' - ',' x ','multiply','divided by','binary','hexadecimal','octal','shift','sin ','cos ','tan ']):
try:
speak(('Result is: ' + math_function.perform(text)), True, True)
except Exception as e:
return
return
if "joke" in text:
speak('Here is a joke...', True, True)
speak(webScrapping.jokes(), True)
return
if isContain(text, ['news']):
speak('Getting the latest news...', True, True)
headlines,headlineLinks = webScrapping.latestNews(2)
for head in headlines: speak(head, True)
speak('Do you want to read the full news?', True)
text = record(False, False)
if isContain(text, ["no","don't"]):
speak("No Problem "+ownerDesignation, True)
else:
speak("Ok "+ownerDesignation+", Opening browser...", True)
webScrapping.openWebsite('https://indianexpress.com/latest-news/')
speak("You can now read the full news from this website.")
return
if isContain(text, ['weather']):
data = webScrapping.weather()
speak('', False, True)
showSingleImage("weather", data[:-1])
speak(data[-1])
return
if isContain(text, ['screenshot']):
Thread(target=appControl.Win_Opt, args=('screenshot',)).start()
speak("Screen Shot Taken", True, True)
return
if isContain(text, ['window','close that']):
appControl.Win_Opt(text)
return
if isContain(text, ['tab']):
appControl.Tab_Opt(text)
return
if isContain(text, ['setting']):
raise_frame(root2)
clearChatScreen()
return
if isContain(text, ['open','type','save','delete','select','press enter']):
appControl.System_Opt(text)
return
if isContain(text, ['wiki', 'who is']):
Thread(target=webScrapping.downloadImage, args=(text, 1,)).start()
speak('Searching...', True, True)
result = webScrapping.wikiResult(text)
showSingleImage('wiki')
speak(result, True)
return
if isContain(text, ['game']):
speak("Which game do you want to play?", True, True)
attachTOframe(game.showGames(), True)
text = record(False)
if text=="None":
speak("Didn't understand what you say?", True, True)
return
if 'online' in text:
speak("Ok "+ownerDesignation+", Let's play some online games", True, True)
webScrapping.openWebsite('https://www.agame.com/games/mini-games/')
return
if isContain(text, ["don't", "no", "cancel", "back", "never"]):
speak("No Problem "+ownerDesignation+", We'll play next time.", True, True)
else:
speak("Ok "+ownerDesignation+", Let's Play " + text, True, True)
os.system(f"python -c \"import game; game.play('{text}')\"")
return
if isContain(text, ['coin','dice','die']):
if "toss" in text or "roll" in text or "flip" in text:
speak("Ok "+ownerDesignation, True, True)
result = game.play(text)
if "Head" in result: showSingleImage('head')
elif "Tail" in result: showSingleImage('tail')
else: showSingleImage(result[-1])
speak(result)
return
if isContain(text, ['time','date']):
speak(normalChat.chat(text), True, True)
return
if 'my name' in text:
speak('Your name is, ' + ownerName, True, True)
return
if isContain(text, ['voice']):
global voice_id
try:
if 'female' in text: voice_id = 0
elif 'male' in text: voice_id = 1
else:
if voice_id==0: voice_id=1
else: voice_id=0
engine.setProperty('voice', voices[voice_id].id)
ChangeSettings(True)
speak("Hello "+ownerDesignation+", I have changed my voice. How may I help you?", True, True)
assVoiceOption.current(voice_id)
except Exception as e:
print(e)
return
if isContain(text, ['morning','evening','noon']) and 'good' in text:
speak(normalChat.chat("good"), True, True)
return
result = normalChat.reply(text)
if result != "None": speak(result, True, True)
else:
speak("Here's what I found on the web... ", True, True)
webScrapping.googleSearch(text)
##################################### DELETE USER ACCOUNT #########################################
def deleteUserData():
result = messagebox.askquestion('Alert', 'Are you sure you want to delete your Face Data ?')
if result=='no': return
messagebox.showinfo('Clear Face Data', 'Your face has been cleared\nRegister your face again to use.')
import shutil
shutil.rmtree('userData')
root.destroy()
#####################
####### GUI #########
#####################
############ ATTACHING BOT/USER CHAT ON CHAT SCREEN ###########
def attachTOframe(text,bot=False):
if bot:
botchat = Label(chat_frame,text=text, bg=botChatTextBg, fg=botChatText, justify=LEFT, wraplength=250, font=('Montserrat',12, 'bold'))
botchat.pack(anchor='w',ipadx=5,ipady=5,pady=5)
else:
userchat = Label(chat_frame, text=text, bg=userChatTextBg, fg='white', justify=RIGHT, wraplength=250, font=('Montserrat',12, 'bold'))
userchat.pack(anchor='e',ipadx=2,ipady=2,pady=5)
def clearChatScreen():
for wid in chat_frame.winfo_children():
wid.destroy()
### SWITCHING BETWEEN FRAMES ###
def raise_frame(frame):
frame.tkraise()
clearChatScreen()
################# SHOWING DOWNLOADED IMAGES ###############
img0, img1, img2, img3, img4 = None, None, None, None, None
def showSingleImage(type, data=None):
global img0, img1, img2, img3, img4
try:
img0 = ImageTk.PhotoImage(Image.open('Downloads/0.jpg').resize((90,110), Image.ANTIALIAS))
except:
pass
img1 = ImageTk.PhotoImage(Image.open('extrafiles/images/heads.jpg').resize((220,200), Image.ANTIALIAS))
img2 = ImageTk.PhotoImage(Image.open('extrafiles/images/tails.jpg').resize((220,200), Image.ANTIALIAS))
img4 = ImageTk.PhotoImage(Image.open('extrafiles/images/WeatherImage.png'))
if type=="weather":
weather = Frame(chat_frame)
weather.pack(anchor='w')
Label(weather, image=img4, bg=chatBgColor).pack()
Label(weather, text=data[0], font=('Arial Bold', 45), fg='white', bg='#3F48CC').place(x=65,y=45)
Label(weather, text=data[1], font=('Montserrat', 15), fg='white', bg='#3F48CC').place(x=78,y=110)
Label(weather, text=data[2], font=('Montserrat', 10), fg='white', bg='#3F48CC').place(x=78,y=140)
Label(weather, text=data[3], font=('Arial Bold', 12), fg='white', bg='#3F48CC').place(x=60,y=160)
elif type=="wiki":
Label(chat_frame, image=img0, bg='#EAEAEA').pack(anchor='w')
elif type=="head":
Label(chat_frame, image=img1, bg='#EAEAEA').pack(anchor='w')
elif type=="tail":
Label(chat_frame, | |
#!/usr/bin/python2.7
# Compresses the core Blockly files into a single JavaScript file.
#
# Copyright 2012 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates two versions of Blockly's core files:
# blockly_compressed.js
# blockly_uncompressed.js
# The compressed file is a concatenation of all of Blockly's core files which
# have been run through Google's Closure Compiler. This is done using the
# online API (which takes a few seconds and requires an Internet connection).
# The uncompressed file is a script that loads in each of Blockly's core files
# one by one. This takes much longer for a browser to load, but is useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The uncompressed file also allows for a faster developement
# cycle since there is no need to rebuild or recompile, just reload.
#
# This script also generates:
# blocks_compressed.js: The compressed Blockly language blocks.
# javascript_compressed.js: The compressed Javascript generator.
# python_compressed.js: The compressed Python generator.
# dart_compressed.js: The compressed Dart generator.
# lua_compressed.js: The compressed Lua generator.
# msg/js/<LANG>.js for every language <LANG> defined in msg/js/<LANG>.json.
import sys
if sys.version_info[0] != 2:
raise Exception("Blockly build only compatible with Python 2.x.\n"
"You are using: " + sys.version)
import errno, glob, httplib, json, os, re, subprocess, threading, urllib
import subprocess
def import_path(fullpath):
"""Import a file with full path specification.
Allows one to import from any directory, something __import__ does not do.
Args:
fullpath: Path and filename of import.
Returns:
An imported module.
"""
path, filename = os.path.split(fullpath)
filename, ext = os.path.splitext(filename)
sys.path.append(path)
module = __import__(filename)
reload(module) # Might be out of date.
del sys.path[-1]
return module
HEADER = ("// Do not edit this file; automatically generated by build.py.\n"
"'use strict';\n")
class Gen_uncompressed(threading.Thread):
"""Generate a JavaScript file that loads Blockly's raw files.
Runs in a separate thread.
"""
def __init__(self, search_paths):
threading.Thread.__init__(self)
self.search_paths = search_paths
def run(self):
target_filename = 'blockly_uncompressed.js'
f = open(target_filename, 'w')
f.write(HEADER)
f.write("""
var isNodeJS = !!(typeof module !== 'undefined' && module.exports &&
typeof window === 'undefined');
if (isNodeJS) {
var window = {};
require('../closure-library/closure/goog/bootstrap/nodejs');
}
window.BLOCKLY_DIR = (function() {
if (!isNodeJS) {
// Find name of current directory.
var scripts = document.getElementsByTagName('script');
var re = new RegExp('(.+)[\/]blockly_uncompressed\.js$');
for (var i = 0, script; script = scripts[i]; i++) {
var match = re.exec(script.src);
if (match) {
return match[1];
}n
}
alert('Could not detect Blockly\\'s directory name.');
}
return '';
})();
window.BLOCKLY_BOOT = function() {
var dir = '';
if (isNodeJS) {
require('../closure-library/closure/goog/bootstrap/nodejs');
dir = 'blockly';
} else {
// Execute after Closure has loaded.
if (!window.goog) {
alert('Error: Closure not found. Read this:\\n' +
'developers.google.com/blockly/hacking/closure');
}
dir = window.BLOCKLY_DIR.match(/[^\\/]+$/)[0];
}
""")
add_dependency = []
base_path = calcdeps.FindClosureBasePath(self.search_paths)
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
add_dependency.append(calcdeps.GetDepsLine(dep, base_path))
add_dependency = '\n'.join(add_dependency)
# Find the Blockly directory name and replace it with a JS variable.
# This allows blockly_uncompressed.js to be compiled on one computer and be
# used on another, even if the directory name differs.
m = re.search('[\\/]([^\\/]+)[\\/]core[\\/]blockly.js', add_dependency)
add_dependency = re.sub('([\\/])' + re.escape(m.group(1)) +
'([\\/]core[\\/])', '\\1" + dir + "\\2', add_dependency)
f.write(add_dependency + '\n')
provides = []
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
if not dep.filename.startswith(os.pardir + os.sep): # '../'
provides.extend(dep.provides)
provides.sort()
f.write('\n')
f.write('// Load Blockly.\n')
for provide in provides:
f.write("goog.require('%s');\n" % provide)
f.write("""
delete this.BLOCKLY_DIR;
delete this.BLOCKLY_BOOT;
};
if (isNodeJS) {
window.BLOCKLY_BOOT()
module.exports = Blockly;
} else {
// Delete any existing Closure (e.g. Soy's nogoog_shim).
document.write('<script>var goog = undefined;</script>');
// Load fresh Closure Library.
document.write('<script src="' + window.BLOCKLY_DIR +
'/../closure-library/closure/goog/base.js"></script>');
document.write('<script>window.BLOCKLY_BOOT();</script>');
}
""")
f.close()
print("SUCCESS: " + target_filename)
class Gen_compressed(threading.Thread):
"""Generate a JavaScript file that contains all of Blockly's core and all
required parts of Closure, compiled together.
Uses the Closure Compiler's online API.
Runs in a separate thread.
"""
def __init__(self, search_paths):
threading.Thread.__init__(self)
self.search_paths = search_paths
def run(self):
self.gen_all()
def gen_all(self):
subprocess.call(["rm", "blockly_compressed.js"])
subprocess.call(["npx google-closure-compiler --js='../closure-library/closure/goog/**.js' --js='blocks/**.js' --js='core/**.js' --js='msg/js/**.js' --generate_exports --warning_level='QUIET' --compilation_level SIMPLE_OPTIMIZATIONS --entry_point=Blockly --js_output_file blockly_compressed.js"], shell=True)
if not os.path.isfile("blockly_compressed.js") :
print("FATAL ERROR: Compiler did not return compiledCode.")
sys.exit(1)
else:
print("compilation finished: blockly_compressed.js created")
print("Please only use this for the Open Roberta Lab from now on (and msg of course)")
f = open("blockly_compressed.js","r")
compiledCode = f.read()
code = HEADER + "\n" + compiledCode
# Trim down Google's Apache licences.
# The Closure Compiler used to preserve these until August 2015.
# Delete this in a few months if the licences don't return.
LICENSE = re.compile("""/\\*
[\w ]+
(Copyright \\d+ Google Inc.)
https://developers.google.com/blockly/
Licensed under the Apache License, Version 2.0 \(the "License"\);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
\\*/""")
code = re.sub(LICENSE, r"\n// \1 Apache License 2.0", code)
f = open("blockly_compressed.js", "w")
f.write(code)
f.close()
class Gen_langfiles(threading.Thread):
"""Generate JavaScript file for each natural language supported.
Runs in a separate thread.
"""
def __init__(self):
threading.Thread.__init__(self)
def _rebuild(self, srcs, dests):
# Determine whether any of the files in srcs is newer than any in dests.
try:
return True
except OSError as e:
# Was a file not found?
if e.errno == errno.ENOENT:
# If it was a source file, we can't proceed.
if e.filename in srcs:
print("Source file missing: " + e.filename)
sys.exit(1)
else:
# If a destination file was missing, rebuild.
return True
else:
print("Error checking file creation times: " + e)
def run(self):
# The files msg/json/{en,qqq,synonyms}.json depend on msg/messages.js.
if self._rebuild([os.path.join("msg", "messages.js")],
[os.path.join("msg", "json", f) for f in
["en.json", "qqq.json", "synonyms.json"]]):
try:
subprocess.check_call([
"python",
os.path.join("i18n", "js_to_json.py"),
"--input_file", "msg/messages.js",
"--robInput_file", "robMsg/robMessages.js",
"--output_dir", "msg/json/",
"--quiet"])
except (subprocess.CalledProcessError, OSError) as e:
# Documentation for subprocess.check_call says that CalledProcessError
# will be raised on failure, but I found that OSError is also possible.
print("Error running i18n/js_to_json.py: ", e)
sys.exit(1)
# Checking whether it is necessary to rebuild the js files would be a lot of
# work since we would have to compare each <lang>.json file with each
# <lang>.js file. Rebuilding is easy and cheap, so just go ahead and do it.
try:
# Use create_messages.py to create .js files from .json files.
cmd = [
"python",
os.path.join("i18n", "create_messages.py"),
"--source_lang_file", os.path.join("msg", "json", "en.json"),
"--source_synonym_file", os.path.join("msg", "json", "synonyms.json"),
"--key_file", os.path.join("msg", "json", "keys.json"),
"--output_dir", os.path.join("msg", "js"),
"--quiet"]
json_files = glob.glob(os.path.join("msg", "json", "*.json"))
json_files = [file for file in json_files if not
(file.endswith(("keys.json", "synonyms.json", "qqq.json")))]
cmd.extend(json_files)
subprocess.check_call(cmd)
except (subprocess.CalledProcessError, OSError) as e:
print("Error running i18n/create_messages.py: ", e)
sys.exit(1)
# Output list of .js files created.
for f in json_files:
# This assumes the path to the current directory does not contain "json".
f = f.replace("json", "js")
if os.path.isfile(f):
print("SUCCESS: " + f)
else:
print("FAILED to create " + f)
if __name__ == "__main__":
try:
calcdeps = import_path(os.path.join(
os.path.pardir, "closure-library", "closure", "bin", "calcdeps.py"))
except ImportError:
if os.path.isdir(os.path.join(os.path.pardir, "closure-library-read-only")):
# Dir got renamed when Closure moved from Google Code to GitHub in 2014.
print("Error: Closure directory needs to be renamed from"
"'closure-library-read-only' to 'closure-library'.\n"
"Please rename this directory.")
elif os.path.isdir(os.path.join(os.path.pardir, "google-closure-library")):
# When Closure is installed by npm, it is named "google-closure-library".
#calcdeps = import_path(os.path.join(
# os.path.pardir, "google-closure-library", "closure", "bin", "calcdeps.py"))
print("Error: Closure directory needs to be renamed from"
"'google-closure-library' to 'closure-library'.\n"
"Please rename this directory.")
else:
print("""Error: Closure not found. Read this:
https://developers.google.com/blockly/hacking/closure""")
sys.exit(1)
search_paths = calcdeps.ExpandDirectories(
["core", os.path.join(os.path.pardir, "closure-library")])
# Run both tasks in parallel threads.
# Uncompressed is limited | |
<gh_stars>10-100
# coding=utf8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet Train/Eval module.
"""
import time
import sys
import os
import numpy as np
import dataloader
import json
from tqdm import tqdm
import densenet
import resnet
from PIL import Image
import torchvision
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score
from tools import parse
from glob import glob
from skimage import measure
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import traceback
from moxing.framework import file
args = parse.args
# anchor大小
args.anchors = [8, 12, 18, 27, 40, 60]
args.stride = 8
args.image_size = [512,64]
datadir=parse.datadir
file.copy_parallel(args.data_dir_obs, parse.datadir)
class DenseNet121(nn.Module):
"""Model modified.
The architecture of our model is the same as standard DenseNet121
except the classifier layer which has an additional sigmoid function.
"""
def __init__(self, out_size):
super(DenseNet121, self).__init__()
self.inplanes = 1024
self.densenet121 = densenet.densenet121(pretrained=False, small=args.small)
num_ftrs = self.densenet121.classifier.in_features
self.classifier_font = nn.Sequential(
# 这里可以用fc做分类
# nn.Linear(num_ftrs, out_size)
# 这里可以用1×1卷积做分类
nn.Conv2d(num_ftrs, out_size, kernel_size=1, bias=False)
)
self.train_params = []
self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, phase='train'):
feats = self.densenet121(x) # (32, 1024, 2, 16)
if not args.small:
feats = F.max_pool2d(feats, kernel_size=2, stride=2) # (32, 1024, 1, 8)
out = self.classifier_font(feats) # (32, 1824, 1, 8)
out_size = out.size()
# print out.size()
out = out.view(out.size(0),out.size(1),-1) # (32, 1824, 8)
# print out.size()
if phase == 'train':
out = F.adaptive_max_pool1d(out, output_size=(1)).view(out.size(0),-1) # (32, 1824)
return out
else:
out = out.transpose(1,2).contiguous()
out = out.view(out_size[0],out_size[2], out_size[3], out_size[1]) # (32, 1, 8, 1824)
return out, feats
class Loss(nn.Module):
def __init__(self):
super(Loss, self).__init__()
self.classify_loss = nn.BCELoss()
self.sigmoid = nn.Sigmoid()
self.regress_loss = nn.SmoothL1Loss()
def forward(self, font_output, font_target, weight=None, use_hard_mining=False):
font_output = self.sigmoid(font_output)
font_loss = F.binary_cross_entropy(font_output, font_target, weight)
# hard_mining
if use_hard_mining:
font_output = font_output.view(-1)
font_target = font_target.view(-1)
pos_index = font_target > 0.5
neg_index = font_target == 0
# pos
pos_output = font_output[pos_index]
pos_target = font_target[pos_index]
num_hard_pos = max(len(pos_output)/4, min(5, len(pos_output)))
if len(pos_output) > 5:
pos_output, pos_target = hard_mining(pos_output, pos_target, num_hard_pos, largest=False)
pos_loss = self.classify_loss(pos_output, pos_target) * 0.5
# neg
num_hard_neg = len(pos_output) * 2
neg_output = font_output[neg_index]
neg_target = font_target[neg_index]
neg_output, neg_target = hard_mining(neg_output, neg_target, num_hard_neg, largest=True)
neg_loss = self.classify_loss(neg_output, neg_target) * 0.5
font_loss += pos_loss + neg_loss
else:
pos_loss, neg_loss = font_loss, font_loss
return [font_loss, pos_loss, neg_loss]
def _forward(self, font_output, font_target, weight, bbox_output=None, bbox_label=None, seg_output=None, seg_labels=None):
font_output = self.sigmoid(font_output)
font_loss = F.binary_cross_entropy(font_output, font_target, weight)
acc = []
if bbox_output is not None:
# bbox_loss = 0
bbox_output = bbox_output.view((-1, 4))
bbox_label = bbox_label.view((-1, 4))
pos_index = bbox_label[:,-1] >= 0.5
pos_index = pos_index.unsqueeze(1).expand(pos_index.size(0), 4)
neg_index = bbox_label[:,-1] <= -0.5
neg_index = neg_index.unsqueeze(1).expand(neg_index.size(0), 4)
# 正例
pos_label = bbox_label[pos_index].view((-1,4))
pos_output = bbox_output[pos_index].view((-1,4))
lx,ly,ld,lc = pos_label[:,0],pos_label[:,1],pos_label[:,2],pos_label[:,3]
ox,oy,od,oc = pos_output[:,0],pos_output[:,1],pos_output[:,2],pos_output[:,3]
regress_loss = [
self.regress_loss(ox, lx),
self.regress_loss(oy, ly),
self.regress_loss(od, ld),
]
pc = self.sigmoid(oc)
acc.append((pc>=0.5).data.cpu().numpy().astype(np.float32).sum())
acc.append(len(pc))
# print pc.size(), lc.size()
classify_loss = self.classify_loss(pc, lc) * 0.5
# 负例
neg_label = bbox_label[neg_index].view((-1,4))
neg_output = bbox_output[neg_index].view((-1,4))
lc = neg_label[:, 3]
oc = neg_output[:, 3]
pc = self.sigmoid(oc)
acc.append((pc<=0.5).data.cpu().numpy().astype(np.float32).sum())
acc.append(len(pc))
# print pc.size(), lc.size()
classify_loss += self.classify_loss(pc, lc+1) * 0.5
# seg_loss
seg_output = seg_output.view(-1)
seg_labels = seg_labels.view(-1)
pos_index = seg_labels > 0.5
neg_index = seg_labels < 0.5
seg_loss = 0.5 * self.classify_loss(seg_output[pos_index], seg_labels[pos_index]) + \
0.5 * self.classify_loss(seg_output[neg_index], seg_labels[neg_index])
seg_tpr = (seg_output[pos_index] > 0.5).data.cpu().numpy().astype(np.float32).sum() / len(seg_labels[pos_index])
seg_tnr = (seg_output[neg_index] < 0.5).data.cpu().numpy().astype(np.float32).sum() / len(seg_labels[neg_index])
# print seg_output[neg_index]
# print seg_labels[neg_index]
else:
return font_loss
if args.model == 'resnet':
loss = font_loss + classify_loss + seg_loss
else:
loss = font_loss + classify_loss + seg_loss
for reg in regress_loss:
loss += reg
# if args.model == 'resnet':
# loss = seg_loss
return [loss, font_loss, seg_loss, classify_loss] + regress_loss + acc + [seg_tpr, seg_tnr]
font_num = font_target.sum(0).data.cpu().numpy()
font_loss = 0
for di in range(font_num.shape[0]):
if font_num[di] > 0:
font_output_i = font_output[:,di]
font_target_i = font_target[:,di]
pos_font_index = font_target_i > 0.5
font_loss += 0.5 * self.classify_loss(font_output_i[pos_font_index], font_target_i[pos_font_index])
neg_font_index = font_target_i < 0.5
if len(font_target_i[neg_font_index]) > 0:
font_loss += 0.5 * self.classify_loss(font_output_i[neg_font_index], font_target_i[neg_font_index])
font_loss = font_loss / (font_num>0).sum()
return font_loss
# '''
def hard_mining(neg_output, neg_labels, num_hard, largest=True):
num_hard = min(max(num_hard, 10), len(neg_output))
_, idcs = torch.topk(neg_output, min(num_hard, len(neg_output)), largest=largest)
neg_output = torch.index_select(neg_output, 0, idcs)
neg_labels = torch.index_select(neg_labels, 0, idcs)
return neg_output, neg_labels
def save_model(save_dir, phase, name, epoch, f1score, model):
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_dir = os.path.join(save_dir, args.model)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_dir = os.path.join(save_dir, phase)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
state_dict = model.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
state_dict_all = {
'state_dict': state_dict,
'epoch': epoch,
'f1score': f1score,
}
saveStr = '{:s}.ckpt'.format(name)
torch.save( state_dict_all , os.path.join(save_dir, saveStr))
file.copy(os.path.join(save_dir, saveStr), os.path.join(args.save_dir_obs, saveStr))
if 'best' in name and f1score > 0.3:
bestStr = '{:s}_{:s}.ckpt'.format(name, str(epoch))
torch.save(state_dict_all , os.path.join(save_dir, bestStr))
file.copy(os.path.join(save_dir, bestStr),
os.path.join(args.save_dir_obs, bestStr))
def mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
def test(epoch, model, train_loader, phase='test'):
print '\ntest {:s}_files, epoch: {:d}'.format(phase, epoch)
mkdir(datadir+'/data/result')
model.eval()
f1score_list = []
recall_list = []
precision_list = []
word_index_dict = json.load(file.File(args.word_index_json,'r'))
index_word_dict = { v:k for k,v in word_index_dict.items() }
result_file = file.File(datadir+'/data/result/{:d}_{:s}_result.csv'.format(epoch, phase), 'w')
result_file.write('name,content\n')
name_f1score_dict = dict()
# 保存densenet生成的feature
feat_dir = args.data_dir.replace('dataset', 'feats')
mkdir(feat_dir)
feat_dir = os.path.join(feat_dir, phase)
print feat_dir
mkdir(feat_dir)
names = []
probs_all = []
for i,data in enumerate(train_loader):
if i % 50 == 0:
print('step[{:d}] OK...'.format(i))
name = data[0][0].split('/')[-1].split('.seg')[0]
names.append(name)
images, labels = [Variable(x.cuda(async=True)) for x in data[1:3]]
if len(images.size()) == 5:
images = images[0]
probs, feats = model(images, 'test')
probs_all.append(probs.data.cpu().numpy().max(2).max(1).max(0))
#pdb.set_trace()
preds = probs.data.cpu().numpy() > 0.5 # (-1, 8, 1824)
# result_file.write(name+',')
result = u''
last_set = set()
all_set = set()
if args.feat:
# 保存所有的feat
feats = feats.data.cpu().numpy()
if i == 0:
print feats.shape
np.save(os.path.join(feat_dir, name.replace('.jpg','.npy')), feats)
if len(feats) > 1: # feats: [-1, 1024, 1, 8]
# 多个patch
new_feats = []
for i,feat in enumerate(feats):
if i == 0:
# 第一个patch,保存前6个
new_feats.append(feat[:,:,:6])
elif i == len(feats) - 1:
# 最后一个patch,保存后6个
new_feats.append(feat[:,:,2:])
else:
# 保存中间4个
new_feats.append(feat[:,:,2:6])
feats = np.concatenate(new_feats, 2)
# 这种方法用于检测不同区域的同一个字,当同一个字同一个区域出现时,可能检测不到多次
preds = preds.max(1) # 沿着竖直方向pooling
# if len(preds) > 1:
# print name
for patch_i, patch_pred in enumerate(preds):
for part_i, part_pred in enumerate(patch_pred):
new_set = set()
for idx,p in enumerate(part_pred):
if p:
# 出现了这个字
w = index_word_dict[idx]
new_set.add(w)
if w not in all_set:
# 从没见过的字
all_set.add(w)
result += w
elif w not in last_set:
# 以前出现过
if patch_i == 0:
# 第一个patch # 上一个部分没有这个字
result += w
elif part_i >= preds.shape[1]/2 :
# 后续patch的后一半,不写 # 上一个部分没有这个字
result += w
last_set = new_set
result = result.replace(u'"', u'')
if u',' in result:
result = '"' + result + '"'
if len(result) == 0:
global_prob = probs.data.cpu().numpy().max(0).max(0).max(0)
max_index = global_prob.argmax()
result = index_word_dict[max_index]
print name
result_file.write(name+','+result+'\n')
# result_file.write('\n')
if phase == 'test':
continue
result_file.close()
import pandas as pd
re = pd.read_csv(datadir+'/data/result/{:d}_{:s}_result.csv'.format(epoch, phase))
re.columns = ['target_file','text']
submit = pd.read_csv(datadir+'/submission.csv')
submit = pd.merge(submit, re, how='left', on=['target_file'])
submit = submit.drop(['target_file'], axis=1)
submit = submit.replace(to_replace='None',value=20)
submit = submit.fillna('上')
submit.to_csv(datadir+'/predict.csv', header=True, index=None, encoding='utf-8')
file.copy(datadir+'/predict.csv', args.data_dir_obs+'/predict.csv')
def get_weight(labels):
labels = labels.data.cpu().numpy()
weights = np.zeros_like(labels)
# weight_false = 1.0 / ((labels<0.5).sum() + 10e-20)
# weight_true = 1.0 / ((labels>0.5).sum() + 10e-20)
weight_false = 1.0 / ((labels<0.5).sum(0) + 10e-20)
label_true = (labels>0.5).sum(0)
for i in range(labels.shape[1]):
label_i = labels[:,i]
weight_i = np.ones(labels.shape[0]) * weight_false[i]
# weight_i = np.ones(labels.shape[0]) * weight_false
if label_true[i] > 0:
weight_i[label_i>0.5] = 1.0 / label_true[i]
weights[:,i] = weight_i
weights *= np.ones_like(labels).sum() / (weights.sum() + 10e-20)
weights[labels<-0.5] = 0
return weights
def train_eval(epoch, model, train_loader, | |
def private_endpoint_ip_address(self) -> str:
"""
The private IP address of the created private endpoint.
"""
return pulumi.get(self, "private_endpoint_ip_address")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the bastion.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="staticJumpHostIpAddresses")
def static_jump_host_ip_addresses(self) -> Sequence[str]:
"""
A list of IP addresses of the hosts that the bastion has access to. Not applicable to `standard` bastions.
"""
return pulumi.get(self, "static_jump_host_ip_addresses")
@property
@pulumi.getter(name="systemTags")
def system_tags(self) -> Mapping[str, Any]:
"""
Usage of system tag keys. These predefined keys are scoped to namespaces. Example: `{"orcl-cloud.free-tier-retained": "true"}`
"""
return pulumi.get(self, "system_tags")
@property
@pulumi.getter(name="targetSubnetId")
def target_subnet_id(self) -> str:
"""
The unique identifier (OCID) of the subnet that the bastion connects to.
"""
return pulumi.get(self, "target_subnet_id")
@property
@pulumi.getter(name="targetVcnId")
def target_vcn_id(self) -> str:
"""
The unique identifier (OCID) of the virtual cloud network (VCN) that the bastion connects to.
"""
return pulumi.get(self, "target_vcn_id")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The time the bastion was created. Format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2020-01-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The time the bastion was updated. Format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2020-01-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_updated")
@pulumi.output_type
class GetBastionsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
"""
:param str name: A filter to return only resources that match the entire name given.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
"""
A filter to return only resources that match the entire name given.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetSessionKeyDetailsResult(dict):
def __init__(__self__, *,
public_key_content: str):
"""
:param str public_key_content: The public key in OpenSSH format of the SSH key pair for the session. When you connect to the session, you must provide the private key of the same SSH key pair.
"""
pulumi.set(__self__, "public_key_content", public_key_content)
@property
@pulumi.getter(name="publicKeyContent")
def public_key_content(self) -> str:
"""
The public key in OpenSSH format of the SSH key pair for the session. When you connect to the session, you must provide the private key of the same SSH key pair.
"""
return pulumi.get(self, "public_key_content")
@pulumi.output_type
class GetSessionTargetResourceDetailsResult(dict):
def __init__(__self__, *,
session_type: str,
target_resource_display_name: str,
target_resource_id: str,
target_resource_operating_system_user_name: str,
target_resource_port: int,
target_resource_private_ip_address: str):
"""
:param str session_type: The Bastion service recognizes two types of sessions, managed SSH sessions and SSH port forwarding sessions. Managed SSH sessions require that the target resource has an OpenSSH server and the Oracle Cloud Agent both running.
:param str target_resource_display_name: The display name of the target Compute instance that the session connects to.
:param str target_resource_id: The unique identifier (OCID) of the target resource (a Compute instance, for example) that the session connects to.
:param str target_resource_operating_system_user_name: The name of the user on the target resource operating system that the session uses for the connection.
:param int target_resource_port: The port number to connect to on the target resource.
:param str target_resource_private_ip_address: The private IP address of the target resource that the session connects to.
"""
pulumi.set(__self__, "session_type", session_type)
pulumi.set(__self__, "target_resource_display_name", target_resource_display_name)
pulumi.set(__self__, "target_resource_id", target_resource_id)
pulumi.set(__self__, "target_resource_operating_system_user_name", target_resource_operating_system_user_name)
pulumi.set(__self__, "target_resource_port", target_resource_port)
pulumi.set(__self__, "target_resource_private_ip_address", target_resource_private_ip_address)
@property
@pulumi.getter(name="sessionType")
def session_type(self) -> str:
"""
The Bastion service recognizes two types of sessions, managed SSH sessions and SSH port forwarding sessions. Managed SSH sessions require that the target resource has an OpenSSH server and the Oracle Cloud Agent both running.
"""
return pulumi.get(self, "session_type")
@property
@pulumi.getter(name="targetResourceDisplayName")
def target_resource_display_name(self) -> str:
"""
The display name of the target Compute instance that the session connects to.
"""
return pulumi.get(self, "target_resource_display_name")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> str:
"""
The unique identifier (OCID) of the target resource (a Compute instance, for example) that the session connects to.
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter(name="targetResourceOperatingSystemUserName")
def target_resource_operating_system_user_name(self) -> str:
"""
The name of the user on the target resource operating system that the session uses for the connection.
"""
return pulumi.get(self, "target_resource_operating_system_user_name")
@property
@pulumi.getter(name="targetResourcePort")
def target_resource_port(self) -> int:
"""
The port number to connect to on the target resource.
"""
return pulumi.get(self, "target_resource_port")
@property
@pulumi.getter(name="targetResourcePrivateIpAddress")
def target_resource_private_ip_address(self) -> str:
"""
The private IP address of the target resource that the session connects to.
"""
return pulumi.get(self, "target_resource_private_ip_address")
@pulumi.output_type
class GetSessionsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetSessionsSessionResult(dict):
def __init__(__self__, *,
bastion_id: str,
bastion_name: str,
bastion_public_host_key_info: str,
bastion_user_name: str,
display_name: str,
id: str,
key_details: 'outputs.GetSessionsSessionKeyDetailsResult',
key_type: str,
lifecycle_details: str,
session_ttl_in_seconds: int,
ssh_metadata: Mapping[str, Any],
state: str,
target_resource_details: 'outputs.GetSessionsSessionTargetResourceDetailsResult',
time_created: str,
time_updated: str):
"""
:param str bastion_id: The unique identifier (OCID) of the bastion in which to list sessions.
:param str bastion_name: The name of the bastion that is hosting this session.
:param str bastion_public_host_key_info: The public key of the bastion host. You can use this to verify that you're connecting to the correct bastion.
:param str bastion_user_name: The username that the session uses to connect to the target resource.
:param str display_name: A filter to return only resources that match the entire display name given.
:param str id: The unique identifier (OCID) of the session, which can't be changed after creation.
:param 'GetSessionsSessionKeyDetailsArgs' key_details: Public key details for a bastion session.
:param str key_type: The type of the key used to connect to the session. PUB is a standard public key in OpenSSH format.
:param str lifecycle_details: A message describing the current session state in more detail.
:param int session_ttl_in_seconds: The amount of time the session can remain active.
:param Mapping[str, Any] ssh_metadata: The connection message for the session.
:param str state: The current state of the session.
:param 'GetSessionsSessionTargetResourceDetailsArgs' target_resource_details: Details about a bastion session's target resource.
:param str time_created: The time the session was created. Format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2020-01-25T21:10:29.600Z`
:param str time_updated: The time the session was updated. Format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2020-01-25T21:10:29.600Z`
"""
pulumi.set(__self__, "bastion_id", bastion_id)
pulumi.set(__self__, "bastion_name", bastion_name)
pulumi.set(__self__, "bastion_public_host_key_info", bastion_public_host_key_info)
pulumi.set(__self__, "bastion_user_name", bastion_user_name)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "key_details", key_details)
pulumi.set(__self__, "key_type", key_type)
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
pulumi.set(__self__, "session_ttl_in_seconds", session_ttl_in_seconds)
pulumi.set(__self__, "ssh_metadata", ssh_metadata)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "target_resource_details", target_resource_details)
pulumi.set(__self__, "time_created", time_created)
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="bastionId")
def bastion_id(self) -> str:
"""
The unique identifier (OCID) of the bastion in which to list sessions.
"""
return pulumi.get(self, "bastion_id")
@property
@pulumi.getter(name="bastionName")
def bastion_name(self) -> str:
"""
The name of the bastion that is hosting this session.
"""
return pulumi.get(self, "bastion_name")
@property
@pulumi.getter(name="bastionPublicHostKeyInfo")
def bastion_public_host_key_info(self) -> str:
"""
The public key of the bastion host. You can use this to verify that you're connecting to the correct bastion.
"""
return pulumi.get(self, "bastion_public_host_key_info")
@property
@pulumi.getter(name="bastionUserName")
def bastion_user_name(self) -> str:
"""
The username that the session uses to connect to the target resource.
"""
return pulumi.get(self, "bastion_user_name")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A filter to return only resources that match the entire display name given.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The unique identifier (OCID) of the session, which can't be changed after creation.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="keyDetails")
def key_details(self) -> 'outputs.GetSessionsSessionKeyDetailsResult':
"""
Public key details for a bastion session.
"""
return pulumi.get(self, "key_details")
@property
@pulumi.getter(name="keyType")
def key_type(self) -> str:
"""
The type of the key used to connect to the session. PUB is a standard public key in OpenSSH format.
"""
return pulumi.get(self, "key_type")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> str:
"""
A message describing the current session state in more detail.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter(name="sessionTtlInSeconds")
def session_ttl_in_seconds(self) -> int:
"""
The amount of time the session can remain active.
| |
= Constraint(expr=m.x273*m.x684 - m.x384*m.x590 == 0)
m.c1302 = Constraint(expr=m.x273*m.x685 - m.x385*m.x590 == 0)
m.c1303 = Constraint(expr=m.x274*m.x690 - m.x386*m.x592 == 0)
m.c1304 = Constraint(expr=m.x274*m.x691 - m.x387*m.x592 == 0)
m.c1305 = Constraint(expr=m.x274*m.x692 - m.x388*m.x592 == 0)
m.c1306 = Constraint(expr=m.x274*m.x693 - m.x389*m.x592 == 0)
m.c1307 = Constraint(expr=m.x275*m.x694 - m.x390*m.x593 == 0)
m.c1308 = Constraint(expr=m.x275*m.x695 - m.x391*m.x593 == 0)
m.c1309 = Constraint(expr=m.x275*m.x696 - m.x392*m.x593 == 0)
m.c1310 = Constraint(expr=m.x275*m.x697 - m.x393*m.x593 == 0)
m.c1311 = Constraint(expr=m.x276*m.x698 - m.x394*m.x594 == 0)
m.c1312 = Constraint(expr=m.x276*m.x699 - m.x395*m.x594 == 0)
m.c1313 = Constraint(expr=m.x276*m.x700 - m.x396*m.x594 == 0)
m.c1314 = Constraint(expr=m.x276*m.x701 - m.x397*m.x594 == 0)
m.c1315 = Constraint(expr=m.x277*m.x698 - m.x398*m.x594 == 0)
m.c1316 = Constraint(expr=m.x277*m.x699 - m.x399*m.x594 == 0)
m.c1317 = Constraint(expr=m.x277*m.x700 - m.x400*m.x594 == 0)
m.c1318 = Constraint(expr=m.x277*m.x701 - m.x401*m.x594 == 0)
m.c1319 = Constraint(expr=m.x278*m.x702 - m.x402*m.x595 == 0)
m.c1320 = Constraint(expr=m.x278*m.x703 - m.x403*m.x595 == 0)
m.c1321 = Constraint(expr=m.x278*m.x704 - m.x404*m.x595 == 0)
m.c1322 = Constraint(expr=m.x278*m.x705 - m.x405*m.x595 == 0)
m.c1323 = Constraint(expr=m.x279*m.x702 - m.x406*m.x595 == 0)
m.c1324 = Constraint(expr=m.x279*m.x703 - m.x407*m.x595 == 0)
m.c1325 = Constraint(expr=m.x279*m.x704 - m.x408*m.x595 == 0)
m.c1326 = Constraint(expr=m.x279*m.x705 - m.x409*m.x595 == 0)
m.c1327 = Constraint(expr=m.x280*m.x706 - m.x410*m.x596 == 0)
m.c1328 = Constraint(expr=m.x280*m.x707 - m.x411*m.x596 == 0)
m.c1329 = Constraint(expr=m.x280*m.x708 - m.x412*m.x596 == 0)
m.c1330 = Constraint(expr=m.x280*m.x709 - m.x413*m.x596 == 0)
m.c1331 = Constraint(expr=m.x281*m.x710 - m.x414*m.x597 == 0)
m.c1332 = Constraint(expr=m.x281*m.x711 - m.x415*m.x597 == 0)
m.c1333 = Constraint(expr=m.x281*m.x712 - m.x416*m.x597 == 0)
m.c1334 = Constraint(expr=m.x281*m.x713 - m.x417*m.x597 == 0)
m.c1335 = Constraint(expr=m.x282*m.x718 - m.x418*m.x599 == 0)
m.c1336 = Constraint(expr=m.x282*m.x719 - m.x419*m.x599 == 0)
m.c1337 = Constraint(expr=m.x282*m.x720 - m.x420*m.x599 == 0)
m.c1338 = Constraint(expr=m.x282*m.x721 - m.x421*m.x599 == 0)
m.c1339 = Constraint(expr=m.x283*m.x722 - m.x422*m.x600 == 0)
m.c1340 = Constraint(expr=m.x283*m.x723 - m.x423*m.x600 == 0)
m.c1341 = Constraint(expr=m.x283*m.x724 - m.x424*m.x600 == 0)
m.c1342 = Constraint(expr=m.x283*m.x725 - m.x425*m.x600 == 0)
m.c1343 = Constraint(expr=m.x284*m.x726 - m.x426*m.x601 == 0)
m.c1344 = Constraint(expr=m.x284*m.x727 - m.x427*m.x601 == 0)
m.c1345 = Constraint(expr=m.x284*m.x728 - m.x428*m.x601 == 0)
m.c1346 = Constraint(expr=m.x284*m.x729 - m.x429*m.x601 == 0)
m.c1347 = Constraint(expr=m.x285*m.x726 - m.x430*m.x601 == 0)
m.c1348 = Constraint(expr=m.x285*m.x727 - m.x431*m.x601 == 0)
m.c1349 = Constraint(expr=m.x285*m.x728 - m.x432*m.x601 == 0)
m.c1350 = Constraint(expr=m.x285*m.x729 - m.x433*m.x601 == 0)
m.c1351 = Constraint(expr=m.x286*m.x730 - m.x434*m.x602 == 0)
m.c1352 = Constraint(expr=m.x286*m.x731 - m.x435*m.x602 == 0)
m.c1353 = Constraint(expr=m.x286*m.x732 - m.x436*m.x602 == 0)
m.c1354 = Constraint(expr=m.x286*m.x733 - m.x437*m.x602 == 0)
m.c1355 = Constraint(expr=m.x287*m.x730 - m.x438*m.x602 == 0)
m.c1356 = Constraint(expr=m.x287*m.x731 - m.x439*m.x602 == 0)
m.c1357 = Constraint(expr=m.x287*m.x732 - m.x440*m.x602 == 0)
m.c1358 = Constraint(expr=m.x287*m.x733 - m.x441*m.x602 == 0)
m.c1359 = Constraint(expr=m.x288*m.x734 - m.x442*m.x603 == 0)
m.c1360 = Constraint(expr=m.x288*m.x735 - m.x443*m.x603 == 0)
m.c1361 = Constraint(expr=m.x288*m.x736 - m.x444*m.x603 == 0)
m.c1362 = Constraint(expr=m.x288*m.x737 - m.x445*m.x603 == 0)
m.c1363 = Constraint(expr=m.x289*m.x738 - m.x446*m.x604 == 0)
m.c1364 = Constraint(expr=m.x289*m.x739 - m.x447*m.x604 == 0)
m.c1365 = Constraint(expr=m.x289*m.x740 - m.x448*m.x604 == 0)
m.c1366 = Constraint(expr=m.x289*m.x741 - m.x449*m.x604 == 0)
m.c1367 = Constraint(expr=m.x290*m.x746 - m.x450*m.x606 == 0)
m.c1368 = Constraint(expr=m.x290*m.x747 - m.x451*m.x606 == 0)
m.c1369 = Constraint(expr=m.x290*m.x748 - m.x452*m.x606 == 0)
m.c1370 = Constraint(expr=m.x290*m.x749 - m.x453*m.x606 == 0)
m.c1371 = Constraint(expr=m.x291*m.x750 - m.x454*m.x607 == 0)
m.c1372 = Constraint(expr=m.x291*m.x751 - m.x455*m.x607 == 0)
m.c1373 = Constraint(expr=m.x291*m.x752 - m.x456*m.x607 == 0)
m.c1374 = Constraint(expr=m.x291*m.x753 - m.x457*m.x607 == 0)
m.c1375 = Constraint(expr=m.x292*m.x754 - m.x458*m.x608 == 0)
m.c1376 = Constraint(expr=m.x292*m.x755 - m.x459*m.x608 == 0)
m.c1377 = Constraint(expr=m.x292*m.x756 - m.x460*m.x608 == 0)
m.c1378 = Constraint(expr=m.x292*m.x757 - m.x461*m.x608 == 0)
m.c1379 = Constraint(expr=m.x293*m.x754 - m.x462*m.x608 == 0)
m.c1380 = Constraint(expr=m.x293*m.x755 - m.x463*m.x608 == 0)
m.c1381 = Constraint(expr=m.x293*m.x756 - m.x464*m.x608 == 0)
m.c1382 = Constraint(expr=m.x293*m.x757 - m.x465*m.x608 == 0)
m.c1383 = Constraint(expr=m.x294*m.x758 - m.x466*m.x609 == 0)
m.c1384 = Constraint(expr=m.x294*m.x759 - m.x467*m.x609 == 0)
m.c1385 = Constraint(expr=m.x294*m.x760 - m.x468*m.x609 == 0)
m.c1386 = Constraint(expr=m.x294*m.x761 - m.x469*m.x609 == 0)
m.c1387 = Constraint(expr=m.x295*m.x758 - m.x470*m.x609 == 0)
m.c1388 = Constraint(expr=m.x295*m.x759 - m.x471*m.x609 == 0)
m.c1389 = Constraint(expr=m.x295*m.x760 - m.x472*m.x609 == 0)
m.c1390 = Constraint(expr=m.x295*m.x761 - m.x473*m.x609 == 0)
m.c1391 = Constraint(expr=m.x296*m.x762 - m.x474*m.x610 == 0)
m.c1392 = Constraint(expr=m.x296*m.x763 - m.x475*m.x610 == 0)
m.c1393 = Constraint(expr=m.x296*m.x764 - m.x476*m.x610 == 0)
m.c1394 = Constraint(expr=m.x296*m.x765 - m.x477*m.x610 == 0)
m.c1395 = Constraint(expr=m.x297*m.x766 - m.x478*m.x611 == 0)
m.c1396 = Constraint(expr=m.x297*m.x767 - m.x479*m.x611 == 0)
m.c1397 = Constraint(expr=m.x297*m.x768 - m.x480*m.x611 == 0)
m.c1398 = Constraint(expr=m.x297*m.x769 - m.x481*m.x611 == 0)
m.c1399 = Constraint(expr=m.x298*m.x774 - m.x482*m.x613 == 0)
m.c1400 = Constraint(expr=m.x298*m.x775 - m.x483*m.x613 == 0)
m.c1401 = Constraint(expr=m.x298*m.x776 - m.x484*m.x613 == 0)
m.c1402 = Constraint(expr=m.x298*m.x777 - m.x485*m.x613 == 0)
m.c1403 = Constraint(expr=m.x299*m.x778 - m.x486*m.x614 == 0)
m.c1404 = Constraint(expr=m.x299*m.x779 - m.x487*m.x614 == 0)
m.c1405 = Constraint(expr=m.x299*m.x780 - m.x488*m.x614 == 0)
m.c1406 = Constraint(expr=m.x299*m.x781 - m.x489*m.x614 == 0)
m.c1407 = Constraint(expr=m.x300*m.x782 - m.x490*m.x615 == 0)
m.c1408 = Constraint(expr=m.x300*m.x783 - m.x491*m.x615 == 0)
m.c1409 = Constraint(expr=m.x300*m.x784 - m.x492*m.x615 == 0)
m.c1410 = Constraint(expr=m.x300*m.x785 - m.x493*m.x615 == 0)
m.c1411 = Constraint(expr=m.x301*m.x782 - m.x494*m.x615 == 0)
m.c1412 = Constraint(expr=m.x301*m.x783 - m.x495*m.x615 == 0)
m.c1413 = Constraint(expr=m.x301*m.x784 - m.x496*m.x615 == 0)
m.c1414 = Constraint(expr=m.x301*m.x785 - m.x497*m.x615 == 0)
m.c1415 = Constraint(expr=m.x302*m.x786 - m.x498*m.x616 == 0)
m.c1416 = Constraint(expr=m.x302*m.x787 - m.x499*m.x616 == 0)
m.c1417 = Constraint(expr=m.x302*m.x788 - m.x500*m.x616 == 0)
m.c1418 = Constraint(expr=m.x302*m.x789 - m.x501*m.x616 == 0)
m.c1419 = Constraint(expr=m.x303*m.x786 - m.x502*m.x616 == 0)
m.c1420 = Constraint(expr=m.x303*m.x787 - m.x503*m.x616 == 0)
m.c1421 = Constraint(expr=m.x303*m.x788 - m.x504*m.x616 == 0)
m.c1422 = Constraint(expr=m.x303*m.x789 - m.x505*m.x616 == 0)
m.c1423 = Constraint(expr=m.x304*m.x790 - m.x506*m.x617 == 0)
m.c1424 = Constraint(expr=m.x304*m.x791 - m.x507*m.x617 == 0)
m.c1425 = Constraint(expr=m.x304*m.x792 - m.x508*m.x617 == 0)
m.c1426 = Constraint(expr=m.x304*m.x793 - m.x509*m.x617 == 0)
m.c1427 = Constraint(expr=m.x305*m.x794 - m.x510*m.x618 == 0)
m.c1428 = Constraint(expr=m.x305*m.x795 - m.x511*m.x618 == 0)
m.c1429 = Constraint(expr=m.x305*m.x796 - m.x512*m.x618 == 0)
m.c1430 = Constraint(expr=m.x305*m.x797 - m.x513*m.x618 == 0)
m.c1431 = Constraint(expr=m.x306*m.x802 - m.x514*m.x620 == 0)
m.c1432 = Constraint(expr=m.x306*m.x803 - m.x515*m.x620 == 0)
m.c1433 = Constraint(expr=m.x306*m.x804 - m.x516*m.x620 == 0)
m.c1434 = Constraint(expr=m.x306*m.x805 - m.x517*m.x620 == 0)
m.c1435 = Constraint(expr=m.x307*m.x806 - m.x518*m.x621 == 0)
m.c1436 = Constraint(expr=m.x307*m.x807 - m.x519*m.x621 == 0)
m.c1437 = Constraint(expr=m.x307*m.x808 - m.x520*m.x621 == 0)
m.c1438 = Constraint(expr=m.x307*m.x809 - m.x521*m.x621 == 0)
m.c1439 = Constraint(expr=m.x308*m.x810 - m.x522*m.x622 == 0)
m.c1440 = Constraint(expr=m.x308*m.x811 - m.x523*m.x622 == 0)
m.c1441 = Constraint(expr=m.x308*m.x812 - m.x524*m.x622 == 0)
m.c1442 = Constraint(expr=m.x308*m.x813 - m.x525*m.x622 == 0)
m.c1443 = Constraint(expr=m.x309*m.x810 - m.x526*m.x622 == 0)
m.c1444 = Constraint(expr=m.x309*m.x811 - m.x527*m.x622 == 0)
m.c1445 = Constraint(expr=m.x309*m.x812 - m.x528*m.x622 == 0)
m.c1446 = Constraint(expr=m.x309*m.x813 - m.x529*m.x622 == 0)
m.c1447 = Constraint(expr=m.x310*m.x814 - m.x530*m.x623 == 0)
m.c1448 = Constraint(expr=m.x310*m.x815 - m.x531*m.x623 == 0)
m.c1449 = Constraint(expr=m.x310*m.x816 - m.x532*m.x623 == 0)
m.c1450 = Constraint(expr=m.x310*m.x817 - m.x533*m.x623 == 0)
m.c1451 = Constraint(expr=m.x311*m.x814 - m.x534*m.x623 == 0)
m.c1452 = Constraint(expr=m.x311*m.x815 - m.x535*m.x623 == 0)
m.c1453 = Constraint(expr=m.x311*m.x816 - m.x536*m.x623 == 0)
m.c1454 = Constraint(expr=m.x311*m.x817 - m.x537*m.x623 == 0)
m.c1455 = Constraint(expr=m.x312*m.x818 - m.x538*m.x624 == 0)
m.c1456 = Constraint(expr=m.x312*m.x819 - m.x539*m.x624 == 0)
m.c1457 = Constraint(expr=m.x312*m.x820 - m.x540*m.x624 == 0)
m.c1458 = Constraint(expr=m.x312*m.x821 - m.x541*m.x624 == 0)
m.c1459 = Constraint(expr=m.x313*m.x822 - m.x542*m.x625 == 0)
m.c1460 = Constraint(expr=m.x313*m.x823 - m.x543*m.x625 == 0)
m.c1461 = Constraint(expr=m.x313*m.x824 - m.x544*m.x625 == 0)
m.c1462 = Constraint(expr=m.x313*m.x825 - m.x545*m.x625 == 0)
m.c1463 = Constraint(expr=m.x314*m.x830 - m.x546*m.x627 == 0)
m.c1464 = Constraint(expr=m.x314*m.x831 - m.x547*m.x627 == 0)
m.c1465 = Constraint(expr=m.x314*m.x832 - m.x548*m.x627 == 0)
m.c1466 = Constraint(expr=m.x314*m.x833 - m.x549*m.x627 == 0)
m.c1467 = Constraint(expr=m.x315*m.x834 - m.x550*m.x628 == 0)
m.c1468 = Constraint(expr=m.x315*m.x835 - m.x551*m.x628 == 0)
m.c1469 = Constraint(expr=m.x315*m.x836 - m.x552*m.x628 == 0)
m.c1470 = Constraint(expr=m.x315*m.x837 - m.x553*m.x628 == 0)
m.c1471 = Constraint(expr=m.x316*m.x838 - m.x554*m.x629 == 0)
m.c1472 = Constraint(expr=m.x316*m.x839 - m.x555*m.x629 == 0)
m.c1473 = Constraint(expr=m.x316*m.x840 - m.x556*m.x629 == 0)
m.c1474 = Constraint(expr=m.x316*m.x841 - m.x557*m.x629 == 0)
m.c1475 = Constraint(expr=m.x317*m.x838 - m.x558*m.x629 == 0)
m.c1476 = Constraint(expr=m.x317*m.x839 - m.x559*m.x629 == 0)
m.c1477 = Constraint(expr=m.x317*m.x840 - m.x560*m.x629 == 0)
m.c1478 = Constraint(expr=m.x317*m.x841 - m.x561*m.x629 == 0)
m.c1479 = Constraint(expr=m.x318*m.x842 - m.x562*m.x630 == 0)
m.c1480 = Constraint(expr=m.x318*m.x843 - m.x563*m.x630 == 0)
m.c1481 = Constraint(expr=m.x318*m.x844 - m.x564*m.x630 == 0)
m.c1482 = Constraint(expr=m.x318*m.x845 - m.x565*m.x630 == 0)
m.c1483 = Constraint(expr=m.x319*m.x842 - m.x566*m.x630 == 0)
m.c1484 = Constraint(expr=m.x319*m.x843 - m.x567*m.x630 == 0)
m.c1485 = Constraint(expr=m.x319*m.x844 - m.x568*m.x630 == 0)
m.c1486 = Constraint(expr=m.x319*m.x845 - m.x569*m.x630 == 0)
m.c1487 = Constraint(expr=m.x320*m.x846 - m.x570*m.x631 == 0)
m.c1488 = Constraint(expr=m.x320*m.x847 - m.x571*m.x631 == 0)
m.c1489 = Constraint(expr=m.x320*m.x848 - m.x572*m.x631 == 0)
m.c1490 = Constraint(expr=m.x320*m.x849 - m.x573*m.x631 == 0)
m.c1491 = Constraint(expr=m.x321*m.x850 - m.x574*m.x632 == 0)
m.c1492 = Constraint(expr=m.x321*m.x851 - m.x575*m.x632 == 0)
m.c1493 = Constraint(expr=m.x321*m.x852 - m.x576*m.x632 == 0)
m.c1494 = Constraint(expr=m.x321*m.x853 - m.x577*m.x632 == 0)
m.c1495 = Constraint(expr= m.x258 >= 0)
m.c1496 = Constraint(expr= m.x259 >= 0)
m.c1497 = Constraint(expr= m.x260 >= 0)
m.c1498 = Constraint(expr= m.x261 >= 0)
m.c1499 = Constraint(expr= m.x262 >= 0)
m.c1500 = Constraint(expr= m.x263 >= 0)
m.c1501 = Constraint(expr= - 5*m.x136 + m.x264 >= 0)
m.c1502 = Constraint(expr= - 5*m.x137 + m.x265 >= 0)
m.c1503 = Constraint(expr= m.x266 >= 0)
m.c1504 = Constraint(expr= m.x267 >= 0)
m.c1505 = Constraint(expr= m.x268 >= 0)
m.c1506 = Constraint(expr= m.x269 >= 0)
m.c1507 = Constraint(expr= m.x270 >= 0)
m.c1508 = Constraint(expr= m.x271 >= 0)
m.c1509 = Constraint(expr= - 5*m.x144 + m.x272 >= 0)
m.c1510 = Constraint(expr= - 5*m.x145 + m.x273 >= 0)
m.c1511 = Constraint(expr= m.x274 >= 0)
m.c1512 = Constraint(expr= m.x275 >= 0)
m.c1513 = Constraint(expr= m.x276 >= 0)
m.c1514 = Constraint(expr= m.x277 >= 0)
m.c1515 = Constraint(expr= m.x278 >= | |
# MINLP written by GAMS Convert at 04/21/18 13:54:20
#
# Equation counts
# Total E G L N X C B
# 6101 101 0 6000 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 6031 6001 30 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 21031 12031 9000 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x2 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x3 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x4 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x5 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x13 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x34 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x35 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x66 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x67 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x68 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x69 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x85 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x86 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x87 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x88 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x89 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x90 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x91 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x92 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x93 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x94 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x95 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x96 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x97 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x98 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x99 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x100 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x101 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x102 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x103 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x104 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x105 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x106 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x107 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x108 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x109 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x110 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x111 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x112 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x113 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x114 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x115 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x116 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x117 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x118 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x119 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x120 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x121 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x122 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x123 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x124 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x125 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x126 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x127 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x128 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x129 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x130 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x131 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x132 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x133 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x134 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x135 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x136 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x137 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x138 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x139 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x140 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x141 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x142 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x143 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x144 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x145 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x146 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x147 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x148 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x149 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x150 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x151 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x152 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x153 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x154 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x155 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x156 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x157 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x158 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x159 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x160 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x161 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x162 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x163 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x164 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x165 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x166 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x167 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x168 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x169 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x170 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x171 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x172 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x173 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x174 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x175 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x176 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x177 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x178 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x179 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x180 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x181 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x182 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x183 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x184 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x185 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x186 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x187 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x188 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x189 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x190 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x191 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x192 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x193 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x194 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x195 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x196 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x197 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x198 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x199 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x200 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x201 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x202 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x203 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x204 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x205 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x206 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x207 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x208 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x209 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x210 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x211 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x212 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x213 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x214 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x215 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x216 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x217 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x218 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x219 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x220 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x221 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x222 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x223 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x224 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x225 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x226 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x227 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x228 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x229 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x230 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x231 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x232 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x233 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x234 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x235 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x236 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x237 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x238 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x239 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x240 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x241 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x242 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x243 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x244 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x245 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x246 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x247 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x248 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x249 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x250 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x251 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x252 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x253 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x254 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x255 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x256 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x257 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x258 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x259 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x260 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x261 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x262 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x263 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x264 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x265 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x266 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x267 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x268 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x269 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x270 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x271 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x272 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x273 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x274 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x275 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x276 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x277 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x278 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x279 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x280 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x281 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x282 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x283 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x284 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x285 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x286 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x287 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x288 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x289 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x290 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x291 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x292 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x293 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x294 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x295 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x296 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x297 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x298 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x299 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x300 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x301 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x302 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x303 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x304 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x305 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x306 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x307 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x308 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x309 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x310 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x311 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x312 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x313 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x314 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x315 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x316 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x317 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x318 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x319 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x320 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x321 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x322 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x323 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x324 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x325 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x326 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x327 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x328 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x329 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x330 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x331 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x332 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x333 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x334 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x335 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x336 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x337 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x338 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x339 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x340 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x341 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x342 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x343 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x344 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x345 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x346 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x347 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x348 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x349 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x350 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x351 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x352 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x353 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x354 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x355 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x356 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x357 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x358 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x359 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x360 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x361 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x362 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x363 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x364 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x365 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x366 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x367 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x368 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x369 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x370 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x371 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x372 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x373 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x374 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x375 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x376 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x377 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x378 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x379 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x380 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x381 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x382 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x383 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x384 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x385 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x386 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x387 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x388 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x389 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x390 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x391 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x392 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x393 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x394 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x395 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x396 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x397 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x398 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x399 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x400 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x401 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x402 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x403 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x404 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x405 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x406 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x407 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x408 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x409 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x410 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x411 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x412 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x413 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x414 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x415 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x416 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x417 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x418 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x419 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x420 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x421 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x422 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x423 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x424 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x425 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x426 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x427 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x428 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x429 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x430 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x431 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x432 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x433 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x434 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x435 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x436 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x437 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x438 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x439 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x440 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x441 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x442 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x443 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x444 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x445 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x446 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x447 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x448 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x449 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x450 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x451 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x452 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x453 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x454 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x455 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x456 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x457 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x458 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x459 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x460 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x461 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x462 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x463 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x464 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x465 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x466 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x467 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x468 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x469 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x470 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x471 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x472 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x473 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x474 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x475 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x476 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x477 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x478 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x479 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x480 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x481 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x482 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x483 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x484 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x485 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x486 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x487 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x488 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x489 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x490 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x491 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x492 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x493 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x494 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x495 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x496 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x497 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x498 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x499 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x500 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x501 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x502 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x503 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x504 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x505 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x506 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x507 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x508 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x509 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x510 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x511 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x512 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x513 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x514 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x515 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x516 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x517 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x518 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x519 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x520 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x521 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x522 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x523 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x524 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x525 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x526 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x527 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x528 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x529 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x530 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x531 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x532 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x533 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x534 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x535 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x536 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x537 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x538 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x539 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x540 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x541 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x542 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x543 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x544 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x545 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x546 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x547 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x548 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x549 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x550 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x551 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x552 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x553 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x554 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x555 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x556 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x557 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x558 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x559 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x560 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x561 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x562 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x563 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x564 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x565 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x566 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x567 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x568 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x569 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x570 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x571 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x572 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x573 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x574 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x575 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x576 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x577 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x578 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x579 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x580 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x581 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x582 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x583 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x584 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x585 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x586 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x587 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x588 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x589 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x590 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x591 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x592 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x593 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x594 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x595 = Var(within=Reals,bounds=(0,None),initialize=0.0333333333333333)
m.x596 | |
for p in proto.params:
base_type = p.ty.replace('const ', '').strip('*')
if 'count' in p.name.lower():
param_count = p.name
if base_type in vulkan.core.objects:
# This is an object to potentially check for validity. First see if it's an array
if '*' in p.ty and 'const' in p.ty and param_count != 'NONE':
loop_params[param_count].append(p.name)
loop_types[param_count].append(str(p.ty[6:-1]))
# Not an array, check for just a base Object that's not in exceptions
elif '*' not in p.ty and (proto.name not in valid_null_object_names or p.name not in valid_null_object_names[proto.name]):
loop_params[0].append(p.name)
loop_types[0].append(str(p.ty))
elif vk_helper.is_type(base_type, 'struct'):
struct_type = base_type
if vk_helper.typedef_rev_dict[struct_type] in vk_helper.struct_dict:
struct_type = vk_helper.typedef_rev_dict[struct_type]
# Parse elements of this struct param to identify objects and/or arrays of objects
for m in sorted(vk_helper.struct_dict[struct_type]):
if vk_helper.struct_dict[struct_type][m]['type'] in vulkan.core.objects and vk_helper.struct_dict[struct_type][m]['type'] not in ['VkPhysicalDevice', 'VkQueue', 'VkFence', 'VkImage', 'VkDeviceMemory']:
if proto.name not in valid_null_object_names or vk_helper.struct_dict[struct_type][m]['name'] not in valid_null_object_names[proto.name]:
# This is not great, but gets the job done for now, but If we have a count and this param is a ptr w/
# last letter 's' OR non-'count' string of count is in the param name, then this is a dynamically sized array param
param_array = False
if param_count != 'NONE':
if '*' in p.ty:
if 's' == p.name[-1] or param_count.lower().replace('count', '') in p.name.lower():
param_array = True
if param_array:
param_name = '%s[i].%s' % (p.name, vk_helper.struct_dict[struct_type][m]['name'])
else:
param_name = '%s->%s' % (p.name, vk_helper.struct_dict[struct_type][m]['name'])
if vk_helper.struct_dict[struct_type][m]['dyn_array']:
if param_count != 'NONE': # this will be a double-embedded loop, use comma delineated 'count,name' for param_name
loop_count = '%s[i].%s' % (p.name, vk_helper.struct_dict[struct_type][m]['array_size'])
loop_params[param_count].append('%s,%s' % (loop_count, param_name))
loop_types[param_count].append('%s' % (vk_helper.struct_dict[struct_type][m]['type']))
else:
loop_count = '%s->%s' % (p.name, vk_helper.struct_dict[struct_type][m]['array_size'])
loop_params[loop_count].append(param_name)
loop_types[loop_count].append('%s' % (vk_helper.struct_dict[struct_type][m]['type']))
else:
if '[' in param_name: # dynamic array param, set size
loop_params[param_count].append(param_name)
loop_types[param_count].append('%s' % (vk_helper.struct_dict[struct_type][m]['type']))
else:
loop_params[0].append(param_name)
loop_types[0].append('%s' % (vk_helper.struct_dict[struct_type][m]['type']))
last_param_index = None
create_func = False
if True in [create_txt in proto.name for create_txt in ['Create', 'Allocate']]:
create_func = True
last_param_index = -1 # For create funcs don't validate last object
(struct_uses, local_decls) = get_object_uses(vulkan.object_type_list, proto.params[:last_param_index])
funcs = []
mutex_unlock = False
funcs.append('%s\n' % self.lineinfo.get())
if proto.name in explicit_object_tracker_functions:
funcs.append('%s%s\n'
'{\n'
' return explicit_%s;\n'
'}' % (qual, decl, proto.c_call()))
return "".join(funcs)
# Temporarily prevent DestroySurface call from being generated until WSI layer support is fleshed out
elif 'DestroyInstance' in proto.name or 'DestroyDevice' in proto.name:
return ""
else:
if create_func:
typ = proto.params[-1].ty.strip('*').replace('const ', '');
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', typ)
name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()[3:]
create_line = ' {\n'
create_line += ' std::lock_guard<std::mutex> lock(global_lock);\n'
create_line += ' if (result == VK_SUCCESS) {\n'
create_line += ' create_%s(%s, *%s, %s);\n' % (name, param0_name, proto.params[-1].name, obj_type_mapping[typ])
create_line += ' }\n'
create_line += ' }\n'
if 'FreeCommandBuffers' in proto.name:
typ = proto.params[-1].ty.strip('*').replace('const ', '');
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', typ)
name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()[3:]
funcs.append('%s\n' % self.lineinfo.get())
destroy_line = ' loader_platform_thread_lock_mutex(&objLock);\n'
destroy_line += ' for (uint32_t i = 0; i < commandBufferCount; i++) {\n'
destroy_line += ' destroy_%s(%s[i], %s[i]);\n' % (name, proto.params[-1].name, proto.params[-1].name)
destroy_line += ' }\n'
destroy_line += ' loader_platform_thread_unlock_mutex(&objLock);\n'
if 'Destroy' in proto.name:
typ = proto.params[-2].ty.strip('*').replace('const ', '');
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', typ)
name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()[3:]
funcs.append('%s\n' % self.lineinfo.get())
destroy_line = ' {\n'
destroy_line += ' std::lock_guard<std::mutex> lock(global_lock);\n'
destroy_line += ' destroy_%s(%s, %s);\n' % (name, param0_name, proto.params[-2].name)
destroy_line += ' }\n'
indent = ' '
if len(struct_uses) > 0:
using_line += '%sVkBool32 skipCall = VK_FALSE;\n' % (indent)
if not mutex_unlock:
using_line += '%s{\n' % (indent)
indent += ' '
using_line += '%sstd::lock_guard<std::mutex> lock(global_lock);\n' % (indent)
mutex_unlock = True
using_line += '// objects to validate: %s\n' % str(sorted(struct_uses))
using_line += self._gen_obj_validate_code(struct_uses, obj_type_mapping, proto.name, valid_null_object_names, param0_name, indent, '', 0)
if mutex_unlock:
indent = indent[4:]
using_line += '%s}\n' % (indent)
if len(struct_uses) > 0:
using_line += ' if (skipCall)\n'
if proto.ret == "VkBool32":
using_line += ' return VK_FALSE;\n'
elif proto.ret != "void":
using_line += ' return VK_ERROR_VALIDATION_FAILED_EXT;\n'
else:
using_line += ' return;\n'
ret_val = ''
stmt = ''
if proto.ret != "void":
ret_val = "%s result = " % proto.ret
stmt = " return result;\n"
dispatch_param = proto.params[0].name
if 'CreateInstance' in proto.name:
dispatch_param = '*' + proto.params[1].name
# Must use 'instance' table for these APIs, 'device' table otherwise
table_type = ""
if proto_is_global(proto):
table_type = "instance"
else:
table_type = "device"
if wsi_name(proto.name):
funcs.append('%s' % wsi_ifdef(proto.name))
funcs.append('%s%s\n'
'{\n'
'%s'
'%s'
' %sget_dispatch_table(object_tracker_%s_table_map, %s)->%s;\n'
'%s'
'%s'
'}' % (qual, decl, using_line, destroy_line, ret_val, table_type, dispatch_param, proto.c_call(), create_line, stmt))
if wsi_name(proto.name):
funcs.append('%s' % wsi_endif(proto.name))
return "\n\n".join(funcs)
def generate_body(self):
self.layer_name = "object_tracker"
extensions=[('wsi_enabled',
['vkCreateSwapchainKHR',
'vkDestroySwapchainKHR', 'vkGetSwapchainImagesKHR',
'vkAcquireNextImageKHR', 'vkQueuePresentKHR'])]
if self.wsi == 'Win32':
instance_extensions=[('msg_callback_get_proc_addr', []),
('wsi_enabled',
['vkDestroySurfaceKHR',
'vkGetPhysicalDeviceSurfaceSupportKHR',
'vkGetPhysicalDeviceSurfaceCapabilitiesKHR',
'vkGetPhysicalDeviceSurfaceFormatsKHR',
'vkGetPhysicalDeviceSurfacePresentModesKHR',
'vkCreateWin32SurfaceKHR',
'vkGetPhysicalDeviceWin32PresentationSupportKHR'])]
elif self.wsi == 'Android':
instance_extensions=[('msg_callback_get_proc_addr', []),
('wsi_enabled',
['vkDestroySurfaceKHR',
'vkGetPhysicalDeviceSurfaceSupportKHR',
'vkGetPhysicalDeviceSurfaceCapabilitiesKHR',
'vkGetPhysicalDeviceSurfaceFormatsKHR',
'vkGetPhysicalDeviceSurfacePresentModesKHR',
'vkCreateAndroidSurfaceKHR'])]
elif self.wsi == 'Xcb' or self.wsi == 'Xlib' or self.wsi == 'Wayland' or self.wsi == 'Mir':
instance_extensions=[('msg_callback_get_proc_addr', []),
('wsi_enabled',
['vkDestroySurfaceKHR',
'vkGetPhysicalDeviceSurfaceSupportKHR',
'vkGetPhysicalDeviceSurfaceCapabilitiesKHR',
'vkGetPhysicalDeviceSurfaceFormatsKHR',
'vkGetPhysicalDeviceSurfacePresentModesKHR',
'vkCreateXcbSurfaceKHR',
'vkGetPhysicalDeviceXcbPresentationSupportKHR',
'vkCreateXlibSurfaceKHR',
'vkGetPhysicalDeviceXlibPresentationSupportKHR',
'vkCreateWaylandSurfaceKHR',
'vkGetPhysicalDeviceWaylandPresentationSupportKHR',
'vkCreateMirSurfaceKHR',
'vkGetPhysicalDeviceMirPresentationSupportKHR'])]
else:
print('Error: Undefined DisplayServer')
instance_extensions=[]
body = [self.generate_maps(),
self.generate_procs(),
self.generate_destroy_instance(),
self.generate_destroy_device(),
self._generate_dispatch_entrypoints("VK_LAYER_EXPORT"),
self._generate_extensions(),
self._generate_layer_gpa_function(extensions,
instance_extensions)]
return "\n\n".join(body)
class UniqueObjectsSubcommand(Subcommand):
def generate_header(self):
header_txt = []
header_txt.append('%s' % self.lineinfo.get())
header_txt.append('#include "unique_objects.h"')
return "\n".join(header_txt)
# Generate UniqueObjects code for given struct_uses dict of objects that need to be unwrapped
# vector_name_set is used to make sure we don't replicate vector names
# first_level_param indicates if elements are passed directly into the function else they're below a ptr/struct
# TODO : Comment this code
def _gen_obj_code(self, struct_uses, param_type, indent, prefix, array_index, vector_name_set, first_level_param):
decls = ''
pre_code = ''
post_code = ''
for obj in sorted(struct_uses):
name = obj
array = ''
if '[' in obj:
(name, array) = obj.split('[')
array = array.strip(']')
ptr_type = False
if 'p' == obj[0] and obj[1] != obj[1].lower(): # TODO : Not ideal way to determine ptr
ptr_type = True
if isinstance(struct_uses[obj], dict):
local_prefix = ''
name = '%s%s' % (prefix, name)
if ptr_type:
if first_level_param and name in param_type:
pre_code += '%sif (%s) {\n' % (indent, name)
else: # shadow ptr will have been initialized at this point so check it vs. source ptr
pre_code += '%sif (local_%s) {\n' % (indent, name)
indent += ' '
if array != '':
idx = 'idx%s' % str(array_index)
array_index += 1
if first_level_param and name in param_type:
pre_code += '%slocal_%s = new safe_%s[%s];\n' % (indent, name, param_type[name].strip('*'), array)
post_code += ' if (local_%s)\n' % (name)
post_code += ' delete[] local_%s;\n' % (name)
pre_code += '%sfor (uint32_t %s=0; %s<%s%s; ++%s) {\n' % (indent, idx, idx, prefix, array, idx)
indent += ' '
if first_level_param:
pre_code += '%slocal_%s[%s].initialize(&%s[%s]);\n' % (indent, name, idx, name, idx)
local_prefix = '%s[%s].' % (name, idx)
elif ptr_type:
if first_level_param and name in param_type:
pre_code += '%slocal_%s = new safe_%s(%s);\n' % (indent, name, param_type[name].strip('*'), name)
post_code += ' if (local_%s)\n' % (name)
post_code += ' delete local_%s;\n' % (name)
local_prefix = '%s->' % (name)
else:
local_prefix = '%s.' % (name)
assert isinstance(decls, object)
(tmp_decl, tmp_pre, tmp_post) = self._gen_obj_code(struct_uses[obj], param_type, indent, local_prefix, array_index, vector_name_set, False)
decls += tmp_decl
pre_code += tmp_pre
post_code += tmp_post
if array != '':
indent = indent[4:]
pre_code += '%s}\n' % (indent)
if ptr_type:
indent = indent[4:]
pre_code += '%s}\n' % (indent)
else:
if (array_index > 0) or array != '': # TODO : This is not ideal, really want to know if we're anywhere under an array
if first_level_param:
decls += '%s%s* local_%s = NULL;\n' % (indent, struct_uses[obj], name)
if array != '' and not first_level_param: # ptrs under structs will have been initialized so use local_*
pre_code += '%sif (local_%s%s) {\n' %(indent, prefix, name)
else:
pre_code += '%sif (%s%s) {\n' %(indent, prefix, name)
indent += ' '
if array != '':
idx = 'idx%s' % str(array_index)
array_index += 1
if first_level_param:
pre_code += '%slocal_%s = new %s[%s];\n' % (indent, name, struct_uses[obj], array)
post_code += ' if (local_%s)\n' % (name)
post_code += ' delete[] local_%s;\n' % (name)
pre_code += '%sfor (uint32_t %s=0; %s<%s%s; ++%s) {\n' % (indent, idx, idx, prefix, array, idx)
indent | |
"""
Establishes a PyModbus server/slave context that maps to ClearBlade platform collections.
Requires a unique instance per slave device represented in the platform.
"""
from clearblade.ClearBladeCore import Query
from pymodbus.interfaces import IModbusSlaveContext
from pymodbus.datastore.context import ModbusServerContext
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.constants import Endian
from headless import is_logger, get_wrapping_logger
from store import CbModbusSequentialDataBlock, CbModbusSparseDataBlock
from constants import *
class ClearBladeModbusProxySlaveContext(IModbusSlaveContext):
"""
A Modbus slave context integrated with ClearBlade, derived from an Inmarsat IDP Modbus Proxy template file.
Represents a single remote outstation / RTU
The ClearBlade collection must have a row defining at least the following:
* ``ip_address`` (str) the proxy address (IPv4) assigned to the remote outstation/RTU
* ``ip_port`` (int) the (TCP) port of the proxy address for the RTU
* ``slave_id`` (int) the Modbus Slave ID / Unit ID (0..254)
* ``zero_mode`` (bool)
* ``last_report_time`` (ts) timestamp of the most recently received data from the RTU in the field (metadata)
* ``config_file`` (str) a formatted text file conforming to Inmarsat's IDP Modbus Proxy template
.. todo::
Add URL link to Modbus Proxy template
"""
def __init__(self, server_context, config, **kwargs):
"""
:param ClearBladeModbusProxyServerContext server_context: the parent server context
:param dict config: a row returned from reading the Clearblade collection for RTU configuration
:param kwargs: optional arguments such as log
"""
if is_logger(kwargs.get('log', None)):
self.log = kwargs.get('log')
else:
self.log = get_wrapping_logger(name='ClearBladeModbusSlaveContext',
debug=True if kwargs.get('debug', None) else False)
self.cb_system = server_context.cb_system
self.cb_auth = server_context.cb_auth
self.cb_data_collection = server_context.cb_data
self.ip_proxy = str(config[COL_PROXY_IP_ADDRESS])
self.ip_port = int(config[COL_PROXY_IP_PORT])
if self.ip_proxy == '':
self.log.warning("No proxy IP address specified, may result in conflicting slave_id")
self.slave_id = int(config[COL_SLAVE_ID])
self.zero_mode = True
# self.last_report_time = config[COL_PROXY_TIMESTAMP]
self.identity = ModbusDeviceIdentification()
self.sparse = False
self.store = dict()
# Byte order and word order may be per-register and are assumed to be handled by the Polling Client
# self.byteorder = Endian.Big
# self.wordorder = Endian.Big
self._parse_config(config[COL_PROXY_CONFIG_FILE])
self.log.debug("Slave context {} complete".format(self.ip_proxy))
def _parse_config(self, config_file):
"""Parses the ``config.dat`` file"""
registers = []
lines = config_file.splitlines()
for line in lines:
if line[0:len(TEMPLATE_PARSER_DESC)] == TEMPLATE_PARSER_DESC:
identity = line.split(TEMPLATE_PARSER_SEPARATOR)
for id_tag in identity:
if id_tag[0:len('VendorName')] == 'VendorName':
self.identity.VendorName = id_tag[len('VendorName') + 1:].strip()
elif id_tag[0:len('ProductCode')] == 'ProductCode':
self.identity.ProductCode = id_tag[len('ProductCode') + 1:].strip()
elif id_tag[0:len('VendorUrl')] == 'VendorUrl':
self.identity.VendorUrl = id_tag[len('VendorUrl') + 1:].strip()
elif id_tag[0:len('ProductName')] == 'ProductName':
self.identity.ProductName = id_tag[len('ProductName') + 1:].strip()
elif id_tag[0:len('ModelName')] == 'ModelName':
self.identity.ModelName = id_tag[len('ModelName') + 1:].strip()
elif id_tag[0:len('MajorMinorRevision')] == 'MajorMinorRevision':
self.identity.MajorMinorRevision = id_tag[len('MajorMinorRevision') + 1:].strip()
elif id_tag[0:len('sparse')].lower() == 'sparse':
self.sparse = bool(int(id_tag[len('sparse') + 1:].strip()))
elif line[0:len(TEMPLATE_PARSER_NETWORK)] == TEMPLATE_PARSER_NETWORK:
net_info = line.split(TEMPLATE_PARSER_SEPARATOR)
for i in net_info:
if i[0:len(TEMPLATE_PARSER_SLAVE_ID)] == TEMPLATE_PARSER_SLAVE_ID:
net_id = int(i[len(TEMPLATE_PARSER_SLAVE_ID) + 1:].strip())
if net_id in range(1, 255):
self.slave_id = int(i[len(TEMPLATE_PARSER_SLAVE_ID) + 1:])
else:
self.log.error("Invalid Modbus Slave ID {id}".format(id=net_id))
elif i[0:len(TEMPLATE_PARSER_NOT_ZERO_MODE)] == TEMPLATE_PARSER_NOT_ZERO_MODE:
plc = int(i[len(TEMPLATE_PARSER_NOT_ZERO_MODE) + 1:].strip())
self.zero_mode = False if plc == 1 else True
# Byte order and word order may be per-register and are assumed to be handled by the Polling Client
# elif i[0:len('byteOrder')] == 'byteOrder':
# self.byteorder = Endian.Big if i[len('byteOrder') + 1:].strip() == 'msb' else Endian.Little
# elif i[0:len('wordOrder')] == 'wordOrder':
# self.wordorder = Endian.Big if i[len('wordOrder') + 1:].strip() == 'msw' else Endian.Little
elif line[0:len(TEMPLATE_PARSER_REGISTER_DEF)] == TEMPLATE_PARSER_REGISTER_DEF:
# TODO: handle multi-register blocks
reg_config = line.split(TEMPLATE_PARSER_SEPARATOR)
reg_exists = False
this_reg = None
for c in reg_config:
if c[0:len(TEMPLATE_PARSER_REG_UID)] == TEMPLATE_PARSER_REG_UID:
param_id = int(c[len(TEMPLATE_PARSER_REG_UID) + 1:].strip())
for reg in registers:
if param_id == reg.param_id:
reg_exists = True
if not reg_exists:
registers.append(self._RegisterBlockConfig(param_id=param_id))
reg_exists = True
this_reg = param_id
elif c[0:len(TEMPLATE_PARSER_REG_ADDRESS)] == TEMPLATE_PARSER_REG_ADDRESS:
addr = int(c[len(TEMPLATE_PARSER_REG_ADDRESS) + 1:].strip())
# TODO: confirm this works properly
if not self.zero_mode:
addr += 1
if addr in range(0, 99999+1):
for reg in registers:
if reg.param_id == this_reg:
reg.address = addr
if not reg_exists:
registers.append(self._RegisterBlockConfig(address=addr))
reg_exists = True
else:
self.log.error("Invalid Modbus address {num}".format(num=addr))
elif c[0:len(TEMPLATE_PARSER_REG_TYPE)] == TEMPLATE_PARSER_REG_TYPE:
reg_type = c[len(TEMPLATE_PARSER_REG_TYPE) + 1:].strip()
if reg_type in [TEMPLATE_PARSER_TYPE_INPUT_REGISTER]:
for reg in registers:
if reg.param_id == this_reg:
reg.register_type = TYPE_INPUT_REGISTER
elif reg_type in [TEMPLATE_PARSER_TYPE_HOLDING_REGISTER]:
for reg in registers:
if reg.param_id == this_reg:
reg.register_type = TYPE_HOLDING_REGISTER
elif reg_type in [TEMPLATE_PARSER_TYPE_DISCRETE_INPUT]:
for reg in registers:
if reg.param_id == this_reg:
reg.register_type = TYPE_DISCRETE_INPUT
elif reg_type in [TEMPLATE_PARSER_TYPE_COIL]:
for reg in registers:
if reg.param_id == this_reg:
reg.register_type = TYPE_COIL
else:
self.log.error("Unsupported registerType {}".format(reg_type))
hr_sparse_block = {}
ir_sparse_block = {}
di_sparse_block = {}
co_sparse_block = {}
hr_sequential = []
ir_sequential = []
di_sequential = []
co_sequential = []
for reg in registers:
if reg.register_type == TYPE_HOLDING_REGISTER:
if self.sparse:
hr_sparse_block[reg.address] = 0
else:
hr_sequential.append(reg.address)
elif reg.register_type == TYPE_INPUT_REGISTER:
if self.sparse:
ir_sparse_block[reg.address] = 0
else:
ir_sequential.append(reg.address)
elif reg.register_type == TYPE_DISCRETE_INPUT:
if self.sparse:
di_sparse_block[reg.address] = 0
else: # reg.register_type == TYPE_COIL
di_sequential.append(reg.address)
else:
if self.sparse:
co_sparse_block[reg.address] = 0
else:
co_sequential.append(reg.address)
if self.sparse:
self.store['h'] = self._setup_sparse_block(hr_sparse_block, TYPE_HOLDING_REGISTER)
self.store['i'] = self._setup_sparse_block(ir_sparse_block, TYPE_INPUT_REGISTER)
self.store['d'] = self._setup_sparse_block(di_sparse_block, TYPE_DISCRETE_INPUT)
self.store['c'] = self._setup_sparse_block(co_sparse_block, TYPE_COIL)
else:
self.store['h'] = self._setup_sequential_block(hr_sequential, TYPE_HOLDING_REGISTER)
self.store['i'] = self._setup_sequential_block(ir_sequential, TYPE_INPUT_REGISTER)
self.store['d'] = self._setup_sequential_block(di_sequential, TYPE_DISCRETE_INPUT)
self.store['c'] = self._setup_sequential_block(co_sequential, TYPE_COIL)
def _setup_sparse_block(self, block, register_type):
"""
Sets up a custom ModbusSparseDataBlock for ClearBlade interaction and metadata
:param dict block: a dictionary mapped as {address: value}
:param str register_type: the type of register / memory value ['di', 'co', 'hr', 'ir']
:return: a ModbusDataBlock
:rtype: CbModbusSparseDataBlock or None
"""
if register_type in REGISTER_TYPES and len(block) > 0:
return CbModbusSparseDataBlock(context=self, register_type=register_type, values=block)
else:
return None
def _setup_sequential_block(self, block, register_type):
"""
Sets up a custom ModbusSequentialDataBlock for ClearBlade interaction and metadata
:param list block: a list of contiguous register addresses starting from a base address
:param str register_type: the type of register / memory value ['di', 'co', 'hr', 'ir']
:return: a ModbusDataBlock
:rtype: CbModbusSequentialDataBlock or None
"""
if register_type in REGISTER_TYPES and len(block) > 0:
block.sort()
return CbModbusSequentialDataBlock(context=self, register_type=register_type,
address=block[0],
values=[0 for x in range(block[0], block[len(block)-1])])
else:
return None
class _RegisterBlockConfig(object):
"""Private class for parsing of register block metadata"""
def __init__(self, param_id=None, address=None, register_type=None, block_size=1):
"""
Initialize termporary metadata for parsing
:param int param_id: A unique parameterId defined on the Modbus Proxy remote edge device
:param int address: the starting address of the block
:param str register_type: the type of data 'di', 'co', 'hr', 'ir'
:param int block_size: (optional) for specifying blocks that span multiple registers (unused)
"""
self.param_id = param_id
self.address = address
self.register_type = register_type
self.block_size = block_size
def __str__(self):
"""
Returns a string representation of the context
"""
return "ClearBlade Modbus Proxy Slave Context"
def reset(self):
""" NOT IMPLEMENTED - placeholder for future """
# No-op for this implementation
self.log.warning("Reset requested but no-operation due to complex proxy operation")
def validate(self, fx, address, count=1):
"""
Validates the request to make sure it is in range
:param fx: The function we are working with
:param address: The starting address
:param count: The number of values to test
:returns: True if the request in within range, False otherwise
"""
if not self.zero_mode:
address = address + 1
self.log.debug("validate[{}] {}:{}".format(fx, address, count))
return self.store[self.decode(fx)].validate(address, count)
def getValues(self, fx, address, count=1):
"""
Validates the request to make sure it is in range
:param fx: The function we are working with
:param address: The starting address
:param count: The number of values to retrieve
:returns: The requested values from address:address+count
"""
if not self.zero_mode:
address = address + 1
self.log.debug("getValues[{}] {}:{}".format(fx, address, count))
return self.store[self.decode(fx)].getValues(address, count)
def setValues(self, fx, address, values):
"""
Sets the datastore with the supplied values
:param fx: The function we are working with
:param address: The starting address
:param values: The new values to be set
"""
if not self.zero_mode:
address = address + 1
self.log.debug("setValues[{}] {}:{}".format(fx, address, len(values)))
self.store[self.decode(fx)].setValues(address, values)
class ClearBladeModbusProxyServerContext(ModbusServerContext):
"""
A Modbus server context, initialized by reading a ClearBlade collection defining Slave configurations / templates
"""
def __init__(self, cb_system, cb_auth, cb_slaves_config, cb_data, **kwargs):
"""
Initialize the Server context
:param clearblade.ClearBladeCore.System cb_system: a ClearBlade System
:param clearblade.ClearBladeCore.Device cb_auth: a ClearBlade authenticated Device
:param str cb_slaves_config: the name of the ClearBlade Collection holding Slave definitions
:param str cb_data: the name of the ClearBlade Collection holding data
:param kwargs: optionally takes log definition
"""
super(ClearBladeModbusProxyServerContext, self).__init__(single=kwargs.get('single', False))
if is_logger(kwargs.get('log', | |
<reponame>ayushmankumar7/cc-licenses
"""
Every license can be identified by a URL, e.g. "http://creativecommons.org/licenses/by-nc-sa/4.0/"
or "http://creativecommons.org/licenses/by-nc-nd/2.0/tw/". In the RDF, this is the rdf:about
attribute on the cc:License element.
If a license has a child dc:source element, then this license is a translation of the license
with the url in the dc:source's rdf:resource attribute.
Some licenses ahve a dcq:isReplacedBy element.
"""
import os
import polib
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.utils import translation
from django.utils.translation import gettext
from i18n import DEFAULT_LANGUAGE_CODE
from i18n.utils import (
active_translation,
cc_to_django_language_code,
cc_to_filename_language_code,
get_default_language_for_jurisdiction,
get_translation_object,
)
from licenses import FREEDOM_LEVEL_MAX, FREEDOM_LEVEL_MID, FREEDOM_LEVEL_MIN
from licenses.constants import EXCLUDED_LANGUAGE_IDENTIFIERS
from licenses.transifex import TransifexHelper
MAX_LANGUAGE_CODE_LENGTH = 8
# (by4 and by3 have the same license codes)
BY_LICENSE_CODES = ["by", "by-sa", "by-nc-nd", "by-nc", "by-nc-sa", "by-nd"]
CC0_LICENSE_CODES = ["CC0"]
class LegalCodeQuerySet(models.QuerySet):
def translated(self):
"""
Return a queryset of the LegalCode objects that we are doing the
translation process on.
"""
# We are not translating the 3.0 unported licenses - they are English only
# We are not translating the 3.0 ported licenses - just storing their HTML as-is.
return self.exclude(license__version="3.0")
def valid(self):
"""
Return a queryset of the LegalCode objects that exist and are valid
ones that we expect to work. This will change over time as we add
support for more licenses.
"""
# We'll create LegalCode and License objects for all the by licenses,
# and the zero_1.0 ones.
# We're just doing these license codes and versions for now:
# by* 4.0
# by* 3.0 - including ported
# cc 1.0
# Queries for legalcode objects
BY4_QUERY = Q(
license__version="4.0",
license__license_code__in=BY_LICENSE_CODES,
)
BY3_QUERY = Q(
license__version="3.0",
license__license_code__in=BY_LICENSE_CODES,
)
# There's only one version of CC0.
CC0_QUERY = Q(license__license_code__in=CC0_LICENSE_CODES)
return self.filter(BY4_QUERY | BY3_QUERY | CC0_QUERY).exclude(
language_code__in=EXCLUDED_LANGUAGE_IDENTIFIERS
)
class LegalCode(models.Model):
license = models.ForeignKey(
"licenses.License", on_delete=models.CASCADE, related_name="legal_codes"
)
language_code = models.CharField(
max_length=MAX_LANGUAGE_CODE_LENGTH,
help_text="E.g. 'en', 'en-ca', 'sr-Latn', or 'x-i18n'. Case-sensitive? "
"This is the language code used by CC, which might be a little "
"different from the Django language code.",
)
html_file = models.CharField(
max_length=300, help_text="HTML file we got this from", blank=True, default=""
)
translation_last_update = models.DateTimeField(
help_text="The last_updated field from Transifex for this translation",
null=True,
default=None,
)
title = models.TextField(
help_text="License title in this language, e.g. 'Attribution-NonCommercial-NoDerivs 3.0 Unported'",
blank=True,
default="",
)
html = models.TextField(blank=True, default="")
license_url = models.URLField(unique=True)
deed_url = models.URLField(unique=True)
plain_text_url = models.URLField(unique=True)
objects = LegalCodeQuerySet.as_manager()
class Meta:
ordering = ["license__about"]
def __str__(self):
return f"LegalCode<{self.language_code}, {self.license}>"
def save(self, *args, **kwargs):
license = self.license
self.license_url = build_license_url(
license.license_code,
license.version,
license.jurisdiction_code,
self.language_code,
)
self.deed_url = build_deed_url(
license.license_code,
license.version,
license.jurisdiction_code,
self.language_code,
)
license_url = self.license_url
self.plain_text_url = (
f"{license_url}/index.txt"
if license_url.endswith("legalcode")
else f"{license_url}.txt"
)
super().save(*args, **kwargs)
def _get_save_path(self):
"""
If saving the deed or license as a static file, this returns
the relative path where the saved file should be, not including
the actual filename.
For unported, uses "xu" as the "jurisdiction" in the filename.
E.g. "publicdomain/3.0/xu" or "licenses/4.0"
"""
license = self.license
firstdir = (
"publicdomain" if license.license_code.lower() == "cc0" else "licenses"
)
if license.version == "3.0":
# "xu" for "unported"
return os.path.join(
firstdir, license.version, license.jurisdiction_code or "xu"
)
else:
return os.path.join(firstdir, license.version)
def get_deed_path(self):
"""
See get_license_path()
"""
license = self.license
code = (
"zero"
if license.license_code.lower() == "cc0"
else license.license_code.lower()
)
return os.path.join(
self._get_save_path(),
f"{code}_deed_{self.language_code}.html",
)
def get_license_path(self):
"""
If saving the license as a static file, this returns the relative
path of the file to save it as.
4.0 formula:
/licenses/VERSION/LICENSE_deed_LANGAUGE.html
/licenses/VERSION/LICENSE_legalcode_LANGAUGEhtml
4.0 examples:
/licenses/4.0/by-nc-nd_deed_en.html
/licenses/4.0/by-nc-nd_legalcode_en.html
/licenses/4.0/by_deed_en.html
/licenses/4.0/by_legalcode_en.html
/licenses/4.0/by_deed_zh-Hans.html
/licenses/4.0/by_legalcode_zh-Hans.html
3.0 formula:
/licenses/VERSION/JURISDICTION/LICENSE_deed_LANGAUGE.html
/licenses/VERSION/JURISDICTION/LICENSE_legalcode_LANGAUGE.html
3.0 examples:
/licenses/3.0/xu/by_deed_en.html
/licenses/3.0/xu/by_legalcode.en.html
/licenses/3.0/am/by_deed_hy.html
/licenses/3.0/am/by_legalcode_hy.html
/licenses/3.0/rs/by_deed_rs-Cyrl.html
/licenses/3.0/rs/by_legalcode_rs-Cyrl.html
For jurisdiction, I used “xu” to mean “unported”.
See https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#User-assigned_code_elements.
cc0 formula:
/publicdomain/VERSION/LICENSE_deed_LANGAUGE.html
/publicdomain/VERSION/LICENSE_legalcode_LANGAUGE.html
cc0 examples:
/publicdomain/1.0/zero_deed_en.html
/publicdomain/1.0/zero_legalcode_en.html
/publicdomain/1.0/zero_deed_ja.html
/publicdomain/1.0/zero_legalcode_ja.html"""
license = self.license
code = (
"zero"
if license.license_code.lower() == "cc0"
else license.license_code.lower()
)
return os.path.join(
self._get_save_path(),
f"{code}_legalcode_{self.language_code}.html",
)
def has_english(self):
"""
Return True if there's an English translation for the same license.
"""
return (
self.language_code == "en"
or self.license.legal_codes.filter(language_code="en").exists()
)
def branch_name(self):
"""
If this translation is modified, what is the name of the GitHub branch
we'll use to manage the modifications?
Basically its "{license code}-{version}-{language}[-{jurisdiction code}",
except that all the "by* 4.0" licenses use "cc4" for the license_code part.
This has to be a valid DNS domain, so we also change any _ to - and
remove any periods.
"""
license = self.license
parts = []
if license.license_code.startswith("by") and license.version == "4.0":
parts.append("cc4")
else:
parts.extend([license.license_code, license.version])
parts.append(self.language_code)
if license.jurisdiction_code:
parts.append(license.jurisdiction_code)
return "-".join(parts).replace("_", "-").replace(".", "").lower()
def fat_code(self):
"""
Returns e.g. 'CC BY-SA 4.0' - all upper case etc. No language.
"""
return self.license.fat_code()
@property
def translation_domain(self):
return self.license.resource_slug
def get_translation_object(self):
domain = self.license.resource_slug
return get_translation_object(
django_language_code=cc_to_django_language_code(self.language_code),
domain=domain,
)
def get_pofile(self) -> polib.POFile:
with open(self.translation_filename(), "rb") as f:
content = f.read()
return polib.pofile(content.decode(), encoding="utf-8")
def get_english_pofile(self) -> polib.POFile:
if self.language_code != DEFAULT_LANGUAGE_CODE:
# Same license, just in English translation:
english_legalcode = self.license.get_legalcode_for_language_code(
DEFAULT_LANGUAGE_CODE
)
return english_legalcode.get_pofile()
return self.get_pofile()
def translation_filename(self):
"""
Return absolute path to the .po file with this translation.
These are in the cc-licenses-data repository, in subdirectories:
- "legalcode/"
- language code (should match what Django uses, not what Transifex uses)
- "LC_MESSAGES/" (Django insists on this)
- files
The filenames are {resource_slug}.po (get the resource_slug
from the license).
e.g. for the BY-NC 4.0 French translation, which has no jurisdiction,
the filename will be "by-nc_4.0.po", and in full,
"{translation repo topdir}/legalcode/fr/by-nc_4.0.po".
"""
filename = f"{self.license.resource_slug}.po"
fullpath = os.path.join(
settings.TRANSLATION_REPOSITORY_DIRECTORY,
"legalcode",
cc_to_filename_language_code(self.language_code),
"LC_MESSAGES",
filename,
)
return fullpath
class License(models.Model):
about = models.URLField(
max_length=200,
help_text="The license's unique identifier, e.g. 'http://creativecommons.org/licenses/by-nd/2.0/br/'",
unique=True,
)
license_code = models.CharField(
max_length=40,
help_text="shorthand representation for which class of licenses this falls into. "
"E.g. 'by-nc-sa', or 'MIT', 'nc-sampling+', 'devnations', ...",
)
version = models.CharField(
max_length=3, help_text="E.g. '4.0'. Not required.", blank=True, default=""
)
jurisdiction_code = models.CharField(max_length=9, blank=True, default="")
creator_url = models.URLField(
max_length=200,
blank=True,
default="",
help_text="E.g. http://creativecommons.org",
)
license_class_url = models.URLField(
max_length=200,
help_text="E.g. http://creativecommons.org/license/",
blank=True,
default="",
)
title_english = models.TextField(
help_text="License title in English, e.g. 'Attribution-NonCommercial-NoDerivs 3.0 Unported'",
blank=True,
default="",
)
source = models.ForeignKey(
"self",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="source_of",
help_text="another license that this is the translation of",
)
is_replaced_by = models.ForeignKey(
"self",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="replaces",
help_text="another license that has replaced this one",
)
is_based_on = models.ForeignKey(
"self",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="base_of",
help_text="another license that this one is based on",
)
deprecated_on = models.DateField(
null=True, help_text="if set, the date on which this license was deprecated"
)
permits_derivative_works = models.BooleanField()
permits_reproduction = models.BooleanField()
permits_distribution = models.BooleanField()
permits_sharing = models.BooleanField()
requires_share_alike = models.BooleanField()
requires_notice = models.BooleanField()
requires_attribution = models.BooleanField()
requires_source_code = models.BooleanField()
prohibits_commercial_use = models.BooleanField()
prohibits_high_income_nation_use = models.BooleanField()
class Meta:
ordering = ["-version", "license_code", "jurisdiction_code"]
def __str__(self):
return f"License<{self.license_code},{self.version},{self.jurisdiction_code}>"
def get_metadata(self):
"""
Return a dictionary with the metadata for this license.
"""
data = {
"license_code": self.license_code,
"version": self.version,
"title_english": self.title_english,
}
if self.jurisdiction_code:
data["jurisdiction"] = self.jurisdiction_code
data["permits_derivative_works"] = self.permits_derivative_works
data["permits_reproduction"] = self.permits_reproduction
data["permits_distribution"] = self.permits_distribution
data["permits_sharing"] = self.permits_sharing
data["requires_share_alike"] = self.requires_share_alike
data["requires_notice"] = self.requires_notice
data["requires_attribution"] = self.requires_attribution
data["requires_source_code"] = self.requires_source_code
data["prohibits_commercial_use"] = self.prohibits_commercial_use
data["prohibits_high_income_nation_use"] = self.prohibits_high_income_nation_use
data["translations"] = {}
for lc in self.legal_codes.order_by("language_code"):
language_code = lc.language_code
with active_translation(lc.get_translation_object()):
data["translations"][language_code] = {
"license": lc.license_url,
"deed": lc.deed_url,
"title": gettext(self.title_english),
}
return data
def logos(self):
"""
Return an iterable of the codes for the logos that should be
displayed with this license. E.g.:
["cc-logo", "cc-zero", "cc-by"]
"""
result = ["cc-logo"] # Everybody gets this
if self.license_code == "CC0":
result.append("cc-zero")
elif self.license_code.startswith("by"):
result.append("cc-by")
if self.prohibits_commercial_use:
result.append("cc-nc")
if self.requires_share_alike:
result.append("cc-sa")
if not self.permits_derivative_works:
result.append("cc-nd")
return result
def get_legalcode_for_language_code(self, language_code):
"""
Return the LegalCode object for this license and language.
If language_code has a "-" and we don't find it, try
without the "-*" part (to handle e.g. "en-us").
"""
if not language_code:
language_code = translation.get_language()
try:
return self.legal_codes.get(language_code=language_code)
except LegalCode.DoesNotExist:
if "-" in language_code: # e.g. "en-us"
lang = language_code.split("-")[0]
return self.legal_codes.get(language_code=lang)
raise
@property
def resource_name(self):
"""Human-readable name for the translation resource for this license"""
return self.fat_code()
@property
def resource_slug(self):
# Transifex translation resource slug for this license.
# letters, | |
<filename>scripts/qos/test_qos.py
from tcutils.wrappers import preposttest_wrapper
from compute_node_test import ComputeNodeFixture
import test
from common.qos.base import *
from svc_instance_fixture import SvcInstanceFixture
from svc_template_fixture import SvcTemplateFixture
from policy_test import PolicyFixture
from vn_policy_test import VN_Policy_Fixture
from tcutils.traffic_utils.scapy_traffic_gen import ScapyTraffic
from tcutils.traffic_utils.traffic_analyzer import TrafficAnalyzer
from tcutils.util import skip_because
class TestQos(QosTestExtendedBase):
@classmethod
def setUpClass(cls):
super(TestQos, cls).setUpClass()
cls.fc_id_obj = FcIdGenerator(cls.vnc_lib)
# end setUpClass
@classmethod
def tearDownClass(cls):
super(TestQos, cls).tearDownClass()
# end tearDownClass
@test.attr(type=['cb_sanity', 'sanity'])
@preposttest_wrapper
@skip_because(hypervisor='docker',msg='Bug 1654955')
def test_qos_remark_dscp_on_vmi(self):
''' Create a qos config for remarking DSCP 1 to 10
Have VMs A, B
Apply the qos config to VM A
Validate that packets from A to B have DSCP marked correctly
'''
fc_id = self.fc_id_obj.get_free_fc_ids(1)
fcs = [{'fc_id': fc_id[0], 'dscp': 10, 'dot1p': 1, 'exp': 1}]
fc_fixtures = self.setup_fcs(fcs)
dscp_map = {1: fc_id[0]}
qos_fixture = self.setup_qos_config(dscp_map=dscp_map)
vm1_vmi_id = self.vn1_vm1_fixture.get_vmi_ids().values()[0]
self.setup_qos_config_on_vmi(qos_fixture, vm1_vmi_id)
assert self.validate_packet_qos_marking(
src_vm_fixture=self.vn1_vm1_fixture,
dest_vm_fixture=self.vn1_vm2_fixture,
dscp=dscp_map.keys()[0],
expected_dscp=fcs[0]['dscp'],
expected_dot1p=fcs[0]['dot1p'],
src_port='10000',
dest_port='20000',
src_compute_fixture=self.vn1_vm1_compute_fixture)
# end test_qos_remark_dscp_on_vmi
@preposttest_wrapper
def test_qos_remark_dscp_on_vmi_ipv6(self):
''' Create a qos config for remarking DSCP 1 to 10
Have VMs A, B with IPv6 IPs configured
Apply the qos config to VM A
Validate that packets from A to B have DSCP marked correctly
'''
fc_id = self.fc_id_obj.get_free_fc_ids(1)
fcs = [{'fc_id': fc_id[0], 'dscp': 10, 'dot1p': 1, 'exp': 1}]
fc_fixtures = self.setup_fcs(fcs)
dscp_map = {1: fc_id[0]}
qos_fixture = self.setup_qos_config(dscp_map=dscp_map)
vm1_vmi_id = self.vn1_vm1_fixture.get_vmi_ids().values()[0]
self.setup_qos_config_on_vmi(qos_fixture, vm1_vmi_id)
assert self.validate_packet_qos_marking(
src_vm_fixture=self.vn1_vm1_fixture,
dest_vm_fixture=self.vn1_vm2_fixture,
traffic_generator='scapy',
dscp=dscp_map.keys()[0],
expected_dscp=fcs[0]['dscp'],
expected_dot1p=fcs[0]['dot1p'],
src_compute_fixture=self.vn1_vm1_compute_fixture,
af='ipv6',
src_mac=self.vn1_vm1_fixture.mac_addr[
self.vn1_fixture.vn_fq_name],
dst_mac=self.vn1_vm2_fixture.mac_addr[
self.vn1_fixture.vn_fq_name],
ipv6_src=str(self.vn1_vm1_fixture.vm_ips[1]),
ipv6_dst=str(self.vn1_vm2_fixture.vm_ips[1]),
offset = 132)
# end test_qos_remark_dscp_on_vmi_ipv6
@preposttest_wrapper
def test_qos_remark_dot1p_on_vmi(self):
''' Create a qos config for remarking DOT1P 2 to 6
Have VMs A, B
Apply the qos config to VM A
Validate that packets from A to B have all fields marked correctly
Giving a valid destination mac in the packet.
'''
fc_id = self.fc_id_obj.get_free_fc_ids(1)
fcs = [{'fc_id': fc_id[0], 'dscp': 12, 'dot1p': 6, 'exp': 2}]
fc_fixtures = self.setup_fcs(fcs)
dot1p_map = {2: fc_id[0]}
qos_fixture = self.setup_qos_config(dot1p_map=dot1p_map)
vm1_vmi_id = self.vn1_vm1_fixture.get_vmi_ids().values()[0]
self.setup_qos_config_on_vmi(qos_fixture, vm1_vmi_id)
assert self.validate_packet_qos_marking(
src_vm_fixture=self.vn1_vm1_fixture,
dest_vm_fixture=self.vn1_vm2_fixture,
traffic_generator='scapy',
expected_dscp=fcs[0]['dscp'],
expected_dot1p=fcs[0]['dot1p'],
src_compute_fixture=self.vn1_vm1_compute_fixture,
dot1p=dot1p_map.keys()[0],
src_mac=self.vn1_vm1_fixture.mac_addr[
self.vn1_fixture.vn_fq_name],
dst_mac=self.vn1_vm2_fixture.mac_addr[
self.vn1_fixture.vn_fq_name],
offset = 100)
# end test_qos_remark_dot1p_on_vmi
@preposttest_wrapper
def test_qos_remark_dscp_on_vn(self):
''' Create a qos config for remarking DSCP 1 to 10
Have VMs A, B
Apply the qos config to the VN
Validate that packets from A to B have DSCP marked correctly
'''
fc_id = self.fc_id_obj.get_free_fc_ids(1)
fcs = [{'fc_id': fc_id[0], 'dscp': 10, 'dot1p': 1, 'exp': 1}]
self.setup_fcs(fcs)
dscp_map = {1: fc_id[0]}
qos_fixture = self.setup_qos_config(dscp_map=dscp_map)
self.setup_qos_config_on_vn(qos_fixture, self.vn1_fixture.uuid)
assert self.validate_packet_qos_marking(
src_vm_fixture=self.vn1_vm1_fixture,
dest_vm_fixture=self.vn1_vm2_fixture,
dscp=dscp_map.keys()[0],
expected_dscp=fcs[0]['dscp'],
expected_dot1p=fcs[0]['dot1p'],
src_port='10000',
dest_port='20000',
src_compute_fixture=self.vn1_vm1_compute_fixture)
# end test_qos_remark_dscp_on_vmi
@preposttest_wrapper
def test_qos_remark_dscp_on_vn_ipv6(self):
''' Create a qos config for remarking DSCP 1 to 10
Have VMs A, B with IPv6 IPs configured
Apply the qos config to the VN
Validate that packets from A to B have DSCP marked correctly
'''
fc_id = self.fc_id_obj.get_free_fc_ids(1)
fcs = [{'fc_id': fc_id[0], 'dscp': 23, 'dot1p': 3, 'exp': 7}]
self.setup_fcs(fcs)
dscp_map = {10: fc_id[0]}
qos_fixture = self.setup_qos_config(dscp_map=dscp_map)
self.setup_qos_config_on_vn(qos_fixture, self.vn1_fixture.uuid)
assert self.validate_packet_qos_marking(
src_vm_fixture=self.vn1_vm1_fixture,
dest_vm_fixture=self.vn1_vm2_fixture,
traffic_generator='scapy',
dscp=dscp_map.keys()[0],
expected_dscp=fcs[0]['dscp'],
expected_dot1p=fcs[0]['dot1p'],
expected_exp=fcs[0]['exp'],
src_compute_fixture=self.vn1_vm1_compute_fixture,
af='ipv6',
src_mac=self.vn1_vm1_fixture.mac_addr[
self.vn1_fixture.vn_fq_name],
ipv6_src=str(self.vn1_vm1_fixture.vm_ips[1]),
ipv6_dst=str(self.vn1_vm2_fixture.vm_ips[1]),
offset = 156,
encap = "MPLS_any")
# end test_qos_remark_dscp_on_vn_ipv6
@preposttest_wrapper
def test_qos_remark_dot1p_on_vn(self):
''' Create a qos config for remarking Dot1p 3 to 5
Have VMs A, B
Apply the qos config to the VN
Validate that packets from A to B have Dot1P marked correctly
'''
fc_id = self.fc_id_obj.get_free_fc_ids(1)
fcs = [{'fc_id': fc_id[0], 'dscp': 23, 'dot1p': 5, 'exp': 3}]
self.setup_fcs(fcs)
dot1p_map = {3: fc_id[0]}
qos_fixture = self.setup_qos_config(dot1p_map=dot1p_map)
self.setup_qos_config_on_vn(qos_fixture, self.vn1_fixture.uuid)
assert self.validate_packet_qos_marking(
src_vm_fixture=self.vn1_vm1_fixture,
dest_vm_fixture=self.vn1_vm2_fixture,
traffic_generator='scapy',
expected_dscp=fcs[0]['dscp'],
expected_dot1p=fcs[0]['dot1p'],
expected_exp=fcs[0]['exp'],
src_compute_fixture=self.vn1_vm1_compute_fixture,
dot1p=dot1p_map.keys()[0],
src_mac=self.vn1_vm1_fixture.mac_addr[
self.vn1_fixture.vn_fq_name],
encap = "MPLS_any")
# As dst_mac is not mentioned, it will be set to bcast mac.
# end test_qos_remark_dot1p_on_vn
@preposttest_wrapper
def test_qos_config_and_fc_update_for_dscp(self):
''' Create a qos config for remarking DSCP 1 to fc1(DSCP 10)
Have vms A,B. Apply the qos config to VM A
Update the qos-config to map DSCP 1 to fc 2(DSCP 11)
Validate that packets from A to B have DSCP marked to 11
Update the FC 2 with dscp 12
Validate that packets from A to B have DSCP marked to 12
Update FC 2 with fc_id 3
Update qos-config also to point dscp 1 to fc id 3
Validate that packets from A to B have DSCP marked to 12
'''
fc_ids= self.fc_id_obj.get_free_fc_ids(2)
fcs = [{'fc_id': fc_ids[0], 'dscp': 10, 'dot1p': 1, 'exp': 1},
{'fc_id': fc_ids[1], 'dscp': 11, 'dot1p': 1, 'exp': 1}]
fc_fixtures = self.setup_fcs(fcs)
dscp_map1 = {1: fc_ids[0]}
dscp_map2 = {1: fc_ids[1]}
qos_fixture = self.setup_qos_config(dscp_map=dscp_map1)
vm1_vmi_id = self.vn1_vm1_fixture.get_vmi_ids().values()[0]
self.setup_qos_config_on_vmi(qos_fixture, vm1_vmi_id)
# Change the FC for the qos-config entry
qos_fixture.set_entries(dscp_mapping=dscp_map2)
validate_method_args = {
'src_vm_fixture': self.vn1_vm1_fixture,
'dest_vm_fixture': self.vn1_vm2_fixture,
'dscp': dscp_map1.keys()[0],
'expected_dscp': fcs[1]['dscp'],
'expected_dot1p': fcs[1]['dot1p'],
'src_port': '10000',
'dest_port': '20000',
'src_compute_fixture': self.vn1_vm1_compute_fixture}
assert self.validate_packet_qos_marking(**validate_method_args)
# Change FC's dscp remark now
fc_fixtures[1].update(dscp=12, dot1p=5)
validate_method_args['expected_dscp'] = 12
validate_method_args['expected_dot1p'] = 5
assert self.validate_packet_qos_marking(**validate_method_args)
# Change FC id
new_fc_id = self.fc_id_obj.get_free_fc_ids(1)
dscp_map3 = {1: new_fc_id[0]}
fc_fixtures[1].update(fc_id=new_fc_id[0])
qos_fixture.set_entries(dscp_mapping=dscp_map3)
assert self.validate_packet_qos_marking(**validate_method_args)
# end test_qos_config_and_fc_update_for_dscp
@preposttest_wrapper
def test_qos_config_and_fc_update_for_dot1p(self):
''' Create a qos config for remarking Dot1p 1 to fc1(Dot1p 4)
Have vms A,B. Apply the qos config to VN
Update the qos-config to map Dot1p 1 to fc2(Dot1p 6)
Validate that packets from A to B have Dot1P marked to 6
Update the FC 2 with dot1p 2
Validate that packets from A to B have Dot1p marked to 2
Update FC 2 with fc_id 3
Update qos-config also to point Dot1p 1 to fc id 3
Validate that packets from A to B have Dot1p marked to 2
'''
fc_ids= self.fc_id_obj.get_free_fc_ids(2)
fcs = [{'fc_id': fc_ids[0], 'dscp': 10, 'dot1p': 4, 'exp': 1},
{'fc_id': fc_ids[1], 'dscp': 11, 'dot1p': 6, 'exp': 1}]
fc_fixtures = self.setup_fcs(fcs)
dot1p_map1 = {1: fc_ids[0]}
dot1p_map2 = {1: fc_ids[1]}
dot1p_map4 = {2: fc_ids[0]}
qos_fixture = self.setup_qos_config(dot1p_map=dot1p_map1)
self.setup_qos_config_on_vn(qos_fixture, self.vn1_fixture.uuid)
validate_method_args = {
'src_vm_fixture': self.vn1_vm1_fixture,
'dest_vm_fixture': self.vn1_vm2_fixture,
'dot1p': dot1p_map1.keys()[0],
'expected_dscp': fcs[1]['dscp'],
'expected_dot1p': fcs[1]['dot1p'],
'traffic_generator': 'scapy',
'src_mac': self.vn1_vm1_fixture.mac_addr[
self.vn1_fixture.vn_fq_name],
'dst_mac': self.vn1_vm2_fixture.mac_addr[
self.vn1_fixture.vn_fq_name],
'src_compute_fixture': self.vn1_vm1_compute_fixture,
'offset' : 100}
# Change the FC for the qos-config entry
qos_fixture.set_entries(dot1p_mapping=dot1p_map2)
assert self.validate_packet_qos_marking(**validate_method_args)
# Change FC's dot1p remark now
fc_fixtures[1].update(dscp=12, dot1p=7)
validate_method_args['expected_dscp'] = 12
validate_method_args['expected_dot1p'] = 7
assert self.validate_packet_qos_marking(**validate_method_args)
# Change FC id
new_fc_id = self.fc_id_obj.get_free_fc_ids(1)
dot1p_map3 = {1: new_fc_id[0]}
fc_fixtures[1].update(fc_id=new_fc_id[0])
qos_fixture.set_entries(dot1p_mapping=dot1p_map3)
assert self.validate_packet_qos_marking(**validate_method_args)
# Add entry in Dot1P map tablee
qos_fixture.add_entries(dot1p_mapping=dot1p_map4)
validate_method_args['dot1p'] = dot1p_map4.keys()[0]
validate_method_args['expected_dscp'] = fcs[0]['dscp']
validate_method_args['expected_dot1p'] = fcs[0]['dot1p']
assert self.validate_packet_qos_marking(**validate_method_args)
# end test_qos_config_and_fc_update_for_dot1p
@preposttest_wrapper
def test_qos_remark_based_on_default_fc(self):
'''
To test dafault FC ID 0 works as expected
Steps:
1. Configure FC ID 0 which is default FC for all Qos Configs.
2. Create another FC ID
3. Send traffic for Non default FC ID and verify that marking
happens as per that FC ID.
4. Send any other traffic and verify that it automatically gets
mapped to default FC ID and gets marking as per FC ID 0
'''
fc_ids= self.fc_id_obj.get_free_fc_ids(2)
fcs = [{'fc_id': fc_ids[0], 'dscp': 9, 'dot1p': 3, 'exp': 3},
{'fc_id': fc_ids[1], 'dscp': 10, 'dot1p': 4, 'exp': 1}]
fc_fixtures = self.setup_fcs(fcs)
dscp_map = {30: fc_ids[1]}
qos_fixture = self.setup_qos_config(dscp_map=dscp_map,
default_fc_id = fc_ids[0])
vm1_vmi_id = self.vn1_vm1_fixture.get_vmi_ids().values()[0]
self.setup_qos_config_on_vmi(qos_fixture, vm1_vmi_id)
validate_method_args = {
'src_vm_fixture': self.vn1_vm1_fixture,
'dest_vm_fixture': self.vn1_vm2_fixture,
'dscp': dscp_map.keys()[0],
'expected_dscp': fcs[1]['dscp'],
'expected_dot1p': fcs[1]['dot1p'],
'src_compute_fixture': self.vn1_vm1_compute_fixture}
assert self.validate_packet_qos_marking(**validate_method_args)
validate_method_args['dscp'] = 31
validate_method_args['expected_dscp'] = fcs[0]['dscp']
validate_method_args['expected_dot1p'] = fcs[0]['dot1p']
assert self.validate_packet_qos_marking(**validate_method_args)
validate_method_args['dscp'] = 0
assert self.validate_packet_qos_marking(**validate_method_args)
# end test_qos_remark_based_on_default_fc
@preposttest_wrapper
def test_default_fc_update(self):
'''
To test dafault FC ID values can be modified
Steps:
1. Create a qos config and use default FC ID as 0.
2. Verify that traffic get marked as per default FC ID 1
3. Change the default FC ID applied to same qos-config to value 3
and verify marking happens as per new FC ID 3
4. Verify that traffic mapping to valid dscp value in qos-map
gets marked as per the non defaul FC mentioned in qos-map
'''
fc_ids= self.fc_id_obj.get_free_fc_ids(3)
fcs = [{'fc_id': fc_ids[0] , 'dscp': 9, 'dot1p': 3, 'exp': 3},
{'fc_id': fc_ids[1], 'dscp': 10, 'dot1p': 4, 'exp': 4},
{'fc_id': fc_ids[2], 'dscp': 11, 'dot1p': 5, 'exp': 5}]
fc_fixtures = self.setup_fcs(fcs)
dscp_map = {30: fc_ids[1]}
qos_fixture = self.setup_qos_config(dscp_map=dscp_map,
default_fc_id=fc_ids[0])
vm1_vmi_id = self.vn1_vm1_fixture.get_vmi_ids().values()[0]
self.setup_qos_config_on_vmi(qos_fixture, vm1_vmi_id)
validate_method_args = {
'src_vm_fixture': self.vn1_vm1_fixture,
| |
<reponame>ml4ai/tomcat-planrec
# actions
def go_to_school(state):
state["went-to-school"] = 1
return state
def go_to_work(state):
state["went-to-work"] = 1
return state
def do_chores(state):
state["did-chores"] = 1
return state
def do_homework(state):
state["did-homework"] = 1
state["have-homework"] = 0
return state
def stay_for_tutoring(state):
state["stayed-for-tutoring"] = 1
return state
def go_running(state):
state["ran"] = 1
return state
def play_videogames(state):
state["played-videogames"] = 1
return state
def go_to_store(state):
state["went-to-store"] = 1
state["need-groceries"] = 0
return state
def watch_movie(state):
state["watched-movie"] = 1
state["found-movies"] = 0
return state
actions = [
go_to_school,
go_to_work,
do_chores,
do_homework,
stay_for_tutoring,
go_running,
play_videogames,
go_to_store,
watch_movie,
]
# preconditions
def default(state):
return True
def work_raining_homework(state):
if state["raining"]:
if state["have-homework"]:
if state["work-today"]:
return True
return False
def raining_homework(state):
if state["raining"]:
if state["have-homework"]:
if not state["work-today"]:
return True
return False
def work_homework(state):
if not state["raining"]:
if state["have-homework"]:
if state["work-today"]:
return True
return False
def homework(state):
if not state["raining"]:
if state["have-homework"]:
if not state["work-today"]:
return True
return False
def work_raining(state):
if state["raining"]:
if not state["have-homework"]:
if state["work-today"]:
return True
return False
def raining(state):
if state["raining"]:
if not state["have-homework"]:
if not state["work-today"]:
return True
return False
def work(state):
if not state["raining"]:
if not state["have-homework"]:
if state["work-today"]:
return True
return False
def no_work(state):
if not state["raining"]:
if not state["have-homework"]:
if not state["work-today"]:
return True
return False
def work_raining_homework_store(state):
if state["raining"]:
if state["have-homework"]:
if state["work-today"]:
if state["need-groceries"]:
return True
return False
def work_raining_homework_no_store(state):
if state["raining"]:
if state["have-homework"]:
if state["work-today"]:
if not state["need-groceries"]:
return True
return False
def raining_homework_store(state):
if state["raining"]:
if state["have-homework"]:
if not state["work-today"]:
if state["need-groceries"]:
return True
return False
def raining_homework_no_store(state):
if state["raining"]:
if state["have-homework"]:
if not state["work-today"]:
if not state["need-groceries"]:
return True
return False
def work_homework_store(state):
if not state["raining"]:
if state["have-homework"]:
if state["work-today"]:
if state["need-groceries"]:
return True
return False
def work_homework_no_store(state):
if not state["raining"]:
if state["have-homework"]:
if state["work-today"]:
if not state["need-groceries"]:
return True
return False
def homework_store(state):
if not state["raining"]:
if state["have-homework"]:
if not state["work-today"]:
if state["need-groceries"]:
return True
return False
def homework_no_store(state):
if not state["raining"]:
if state["have-homework"]:
if not state["work-today"]:
if not state["need-groceries"]:
return True
return False
def work_raining_store(state):
if state["raining"]:
if not state["have-homework"]:
if state["work-today"]:
if state["need-groceries"]:
return True
return False
def work_raining_no_store(state):
if state["raining"]:
if not state["have-homework"]:
if state["work-today"]:
if not state["need-groceries"]:
return True
return False
def raining_store(state):
if state["raining"]:
if not state["have-homework"]:
if not state["work-today"]:
if state["need-groceries"]:
return True
return False
def raining_no_store(state):
if state["raining"]:
if not state["have-homework"]:
if not state["work-today"]:
if not state["need-groceries"]:
return True
return False
def work_store(state):
if not state["raining"]:
if not state["have-homework"]:
if state["work-today"]:
if state["need-groceries"]:
return True
return False
def work_no_store(state):
if not state["raining"]:
if not state["have-homework"]:
if state["work-today"]:
if not state["need-groceries"]:
return True
return False
def no_work_store(state):
if not state["raining"]:
if not state["have-homework"]:
if not state["work-today"]:
if state["need-groceries"]:
return True
return False
def no_work_no_store(state):
if not state["raining"]:
if not state["have-homework"]:
if not state["work-today"]:
if not state["need-groceries"]:
return True
return False
def raining_homework_movie(state):
if state["raining"]:
if state["have-homework"]:
if not state["work-today"]:
if state["found-movie"]:
return True
return False
def raining_homework_no_movie(state):
if state["raining"]:
if state["have-homework"]:
if not state["work-today"]:
if not state["found-movie"]:
return True
return False
def homework_movie(state):
if not state["raining"]:
if state["have-homework"]:
if not state["work-today"]:
if state["found-movie"]:
return True
return False
def homework_no_movie(state):
if not state["raining"]:
if state["have-homework"]:
if not state["work-today"]:
if not state["found-movie"]:
return True
return False
def raining_movie(state):
if state["raining"]:
if not state["have-homework"]:
if not state["work-today"]:
if state["found-movie"]:
return True
return False
def raining_no_movie(state):
if state["raining"]:
if not state["have-homework"]:
if not state["work-today"]:
if not state["found-movie"]:
return True
return False
def no_work_movie(state):
if not state["raining"]:
if not state["have-homework"]:
if not state["work-today"]:
if state["found-movie"]:
return True
return False
def no_work_no_movie(state):
if not state["raining"]:
if not state["have-homework"]:
if not state["work-today"]:
if not state["found-movie"]:
return True
return False
def work_raining_friday(state):
if state["raining"]:
if state["work-today"]:
return True
return False
def raining_friday(state):
if state["raining"]:
if not state["work-today"]:
return True
return False
def work_friday(state):
if not state["raining"]:
if state["work-today"]:
return True
return False
def no_work_friday(state):
if not state["raining"]:
if not state["work-today"]:
return True
return False
# methods
methods = [
{
"task": "P",
"preconditions": default,
"subtasks": ["MONDAY"],
"t_prob": 0.2,
},
{
"task": "P",
"preconditions": default,
"subtasks": ["TUESDAY"],
"t_prob": 0.2,
},
{
"task": "P",
"preconditions": default,
"subtasks": ["WEDNESDAY"],
"t_prob": 0.2,
},
{
"task": "P",
"preconditions": default,
"subtasks": ["THURSDAY"],
"t_prob": 0.2,
},
{
"task": "P",
"preconditions": default,
"subtasks": ["FRIDAY"],
"t_prob": 0.2,
},
{
"task": "MONDAY",
"preconditions": work_raining_homework,
"subtasks": [
"!go_to_school",
"!go_to_work",
"!do_chores",
"!do_homework",
],
"t_prob": 1,
},
{
"task": "MONDAY",
"preconditions": raining_homework,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!do_chores",
"!do_homework",
],
"t_prob": 1,
},
{
"task": "MONDAY",
"preconditions": work_homework,
"subtasks": [
"!go_to_school",
"!go_to_work",
"!go_running",
"!do_homework",
],
"t_prob": 1,
},
{
"task": "MONDAY",
"preconditions": homework,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!go_running",
"!do_homework",
],
"t_prob": 1,
},
{
"task": "MONDAY",
"preconditions": work_raining,
"subtasks": [
"!go_to_school",
"!go_to_work",
"!do_chores",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "MONDAY",
"preconditions": raining,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!do_chores",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "MONDAY",
"preconditions": work,
"subtasks": [
"!go_to_school",
"!go_to_work",
"!go_running",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "MONDAY",
"preconditions": no_work,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!go_running",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": work_raining_homework_store,
"subtasks": [
"!go_to_work",
"!go_to_school",
"!go_to_store",
"!do_homework",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": work_raining_homework_no_store,
"subtasks": [
"!go_to_work",
"!go_to_school",
"!do_homework",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": raining_homework_store,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!go_to_store",
"!do_homework",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": raining_homework_no_store,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!do_homework",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": work_homework_store,
"subtasks": [
"!go_to_work",
"!go_to_school",
"!go_to_store",
"!do_homework",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": work_homework_no_store,
"subtasks": [
"!go_to_work",
"!go_to_school",
"!do_homework",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": homework_store,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!go_to_store",
"!do_homework",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": homework_no_store,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!do_homework",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": work_raining_store,
"subtasks": [
"!go_to_work",
"!go_to_school",
"!go_to_store",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": work_raining_no_store,
"subtasks": [
"!go_to_work",
"!go_to_school",
"!do_chores",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": raining_store,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!go_to_store",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": raining_no_store,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!do_chores",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": work_store,
"subtasks": [
"!go_to_work",
"!go_to_school",
"!go_to_store",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": work_no_store,
"subtasks": [
"!go_to_work",
"!go_to_school",
"!go_running",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": no_work_store,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!go_to_store",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "TUESDAY",
"preconditions": no_work_no_store,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!go_running",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "WEDNESDAY",
"preconditions": work_raining_homework,
"subtasks": [
"!go_to_school",
"!do_homework",
"!go_to_work",
"!do_chores",
],
"t_prob": 1,
},
{
"task": "WEDNESDAY",
"preconditions": raining_homework,
"subtasks": [
"!go_to_school",
"!do_homework",
"!do_chores",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "WEDNESDAY",
"preconditions": work_homework,
"subtasks": [
"!go_to_school",
"!do_homework",
"!go_to_work",
"!do_chores",
],
"t_prob": 1,
},
{
"task": "WEDNESDAY",
"preconditions": homework,
"subtasks": [
"!go_to_school",
"!do_homework",
"!go_running",
"!do_chores",
],
"t_prob": 1,
},
{
"task": "WEDNESDAY",
"preconditions": work_raining,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!go_to_work",
"!do_chores",
],
"t_prob": 1,
},
{
"task": "WEDNESDAY",
"preconditions": raining,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!play_videogames",
"!do_chores",
],
"t_prob": 1,
},
{
"task": "WEDNESDAY",
"preconditions": work,
"subtasks": [
"!go_to_school",
"!go_to_work",
"!go_running",
"!do_chores",
],
"t_prob": 1,
},
{
"task": "WEDNESDAY",
"preconditions": no_work,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!go_running",
"!do_chores",
],
"t_prob": 1,
},
{
"task": "THURSDAY",
"preconditions": work_raining_homework,
"subtasks": [
"!go_to_work",
"!go_to_school",
"!do_homework",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "THURSDAY",
"preconditions": raining_homework_movie,
"subtasks": [
"!go_to_school",
"!do_homework",
"!do_chores",
"!watch_movie",
],
"t_prob": 1,
},
{
"task": "THURSDAY",
"preconditions": raining_homework_no_movie,
"subtasks": [
"!go_to_school",
"!do_homework",
"!do_chores",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "THURSDAY",
"preconditions": work_homework,
"subtasks": [
"!go_to_work",
"!go_to_school",
"!do_homework",
"!go_running",
],
"t_prob": 1,
},
{
"task": "THURSDAY",
"preconditions": homework_movie,
"subtasks": [
"!go_to_school",
"!do_homework",
"!go_running",
"!watch_movie",
],
"t_prob": 1,
},
{
"task": "THURSDAY",
"preconditions": homework_no_movie,
"subtasks": [
"!go_to_school",
"!do_homework",
"!go_running",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "THURSDAY",
"preconditions": work_raining,
"subtasks": [
"!go_to_work",
"!go_to_school",
"!stay_for_tutoring",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "THURSDAY",
"preconditions": raining_movie,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!play_videogames",
"!watch_movie",
],
"t_prob": 1,
},
{
"task": "THURSDAY",
"preconditions": raining_no_movie,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "THURSDAY",
"preconditions": work,
"subtasks": [
"!go_to_work",
"!go_to_school",
"!go_running",
"!play_videogames",
],
"t_prob": 1,
},
{
"task": "THURSDAY",
"preconditions": no_work_movie,
"subtasks": [
"!go_to_school",
"!stay_for_tutoring",
"!play_videogames",
"!watch_movie",
],
"t_prob": 1,
| |
<filename>defects4cpp/processor/test.py
"""
Test command.
Run tests of the project inside a container.
"""
import argparse
import shutil
from dataclasses import dataclass
from os import getcwd
from pathlib import Path
from textwrap import dedent
from typing import Callable, Generator, List, Optional, Set, Union, cast
from errors import DppArgparseFileNotFoundError, DppCaseExpressionInternalError
from errors.argparser import DppArgparseInvalidCaseExpressionError
from message import message
from processor.core.argparser import create_common_project_parser
from processor.core.command import (
DockerCommand,
DockerCommandScript,
DockerCommandScriptGenerator,
)
from processor.core.data import Worktree
from taxonomy import Command, CommandType, Defect, MetaData
class ValidateCase(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
"""
case_expression == INCLUDE[:EXCLUDE]
INCLUDE | EXCLUDE
* select: ','
* range: '-'
e.g.
1-100:3,6,7 (to 100 from 1 except 3, 6 and 7)
20-30,40-88:47-52 (to 30 from 20 and to 88 from 40 except to 62 from 47)
"""
def expr2set(expr: str) -> Set[int]:
if not expr:
return set()
val: Set[int] = set()
partitions = expr.split(",")
for partition in partitions:
tokens = partition.split("-")
if len(tokens) == 1:
val.add(int(tokens[0]))
else:
val.update(range(int(tokens[0]), int(tokens[1]) + 1))
return val
def validate_each_case(max_num_cases: int, case_set: Set[int]) -> Set[int]:
if all(0 < case <= max_num_cases for case in case_set):
return case_set
raise DppArgparseInvalidCaseExpressionError(
index, metadata.name, max_num_cases, values
)
try:
metadata: MetaData = namespace.metadata
index: int = namespace.worktree.index
except AttributeError:
raise DppCaseExpressionInternalError(namespace)
num_cases = metadata.defects[index - 1].num_cases + len(metadata.defects[index - 1].extra_tests)
expr_tokens = values.split(":")
included_cases = validate_each_case(num_cases, expr2set(expr_tokens[0]))
excluded_cases = validate_each_case(
num_cases, expr2set(expr_tokens[1]) if len(expr_tokens) > 1 else set()
)
setattr(namespace, self.dest, (included_cases, excluded_cases))
class ValidateOutputDirectory(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if not Path(values).exists():
raise DppArgparseFileNotFoundError(values)
setattr(namespace, self.dest, values)
class ObservableAttributeMeta(type):
def __new__(mcs, name, bases, attr, methods=None):
if methods is None:
methods = []
for method in methods:
attr[method] = mcs.wrap(attr[method])
return super().__new__(mcs, name, bases, attr)
@classmethod
def wrap(mcs, fn):
def update(obj, *args, **kwargs):
output = fn(obj, *args, **kwargs)
for callback in obj.callbacks:
callback(*args, **kwargs)
return output
return update
class ObservableAttribute(metaclass=ObservableAttributeMeta):
def __init__(self, callbacks: List[Callable]):
self._callbacks = callbacks
@property
def callbacks(self):
return self._callbacks
class ManagedAttribute(ObservableAttribute, methods=["__set__"]):
def __set_name__(self, owner, name):
self.name = f"_{name}"
def __get__(self, instance, owner):
return getattr(instance, self.name)
def __set__(self, instance, value):
setattr(instance, self.name, value)
@dataclass
class CapturedOutput:
exit_code: int
stream: str
class CapturedOutputAttributeMixin:
_captured_output: Optional[Union[ManagedAttribute, CapturedOutput]] = None
@property
def captured_output(self) -> CapturedOutput:
return self._captured_output
@captured_output.setter
def captured_output(self, value):
self._captured_output = value
class TestCommandScript(DockerCommandScript, CapturedOutputAttributeMixin):
"""
Script to execute test.
"""
__test__ = False
def __init__(
self,
case: int,
command_type: CommandType,
command: List[str],
):
super().__init__(command_type, command)
self._case = case
@property
def case(self) -> int:
return self._case
def before(self):
message.stdout_progress_detail(f"case #{self._case}")
def step(self, linenr: int, line: str):
pass
def output(self, linenr: Optional[int], exit_code: int, output: str):
if linenr == len(self.lines):
self.captured_output = CapturedOutput(exit_code, output)
def after(self):
pass
class SetupTestCommandScript(TestCommandScript):
"""
Script to execute before running actual code.
It prepends an extra command which writes an index number to file addition to the given commands.
It is script's responsibility to read the file and parse into appropriate filter name.
For instance, if the test is automake-generated, it might convert the number into 'TESTS' value.
If the test is cmake-generated, it might convert the number into regex value of '--tests-regex'.
"""
OUTPUT_NAME = "DPP_TEST_INDEX"
def __init__(
self,
case: int,
):
super().__init__(
case,
CommandType.Docker,
[f"sh -c 'echo {case} > {SetupTestCommandScript.OUTPUT_NAME}'"],
)
def before(self):
# Override TestCommandScript.before method to prevent echoing.
pass
class CoverageTestCommandScript(TestCommandScript):
"""
Script to execute test with coverage.
"""
def __init__(
self,
case: int,
command_type: CommandType,
command: List[str],
):
super().__init__(case, command_type, command)
class TeardownTestCommandScript(TestCommandScript):
"""
Script to execute after running CoverageTestCommandScript.
Clear the coverage data by remove gcov directory and its contents.
related to: https://github.com/Suresoft-GLaDOS/defects4cpp/issues/66
"""
__test__ = False
def __init__(
self,
case: int,
):
super().__init__(
case,
CommandType.Docker,
["sh -c 'rm -rf gcov'"],
)
def before(self):
# Override TestCommandScript.before method to prevent echoing.
pass
class GcovCommandScript(DockerCommandScript, CapturedOutputAttributeMixin):
"""
Script to execute gcov.
"""
def __init__(
self,
case: int,
command_type: CommandType,
command: List[str],
):
super().__init__(command_type, command)
self._case = case
@property
def case(self) -> int:
return self._case
def before(self):
pass
def step(self, linenr: int, line: str):
pass
def output(self, linenr: Optional[int], exit_code: Optional[int], output: str):
if linenr == len(self.lines):
self.captured_output = CapturedOutput(exit_code, output)
def after(self):
pass
class RunGcovrTestCommandScript(TestCommandScript):
"""
Script to execute gcovr to make summary.json.
Clear the coverage data by remove gcov directory and its contents.
related to: https://github.com/Suresoft-GLaDOS/defects4cpp/issues/66
"""
def before(self):
pass
def __init__(
self,
case: int,
exclude: List[str],
):
exclude_flags = " ".join(
[f"--gcov-exclude {excluded_gcov}" for excluded_gcov in exclude]
)
super().__init__(
case,
CommandType.Docker,
[f"gcovr {exclude_flags} --keep --use-gcov-files --json --output gcov/summary.json gcov"]
)
class TestCommandScriptGenerator(DockerCommandScriptGenerator):
"""
Factory class of CommandScript
"""
__test__ = False
def __init__(
self,
defect: Defect,
coverage: bool,
test_command: List[Command],
test_cases: Set[int],
callbacks: List[Callable],
metadata: MetaData,
worktree: Worktree,
stream: bool,
):
super().__init__(metadata, worktree, stream)
self._defect = defect
self._coverage = coverage
self._test_command = test_command
self._test_cases = test_cases
self._callbacks = callbacks
self._extra_tests = defect.extra_tests
self._gcov = metadata.common.gcov
def create(self) -> Generator[TestCommandScript, None, None]:
self._attach(CapturedOutputAttributeMixin, "_captured_output")
if self._coverage:
yield from self._create_coverage_impl()
else:
yield from self._create_impl()
def _attach(self, klass, field_name: str):
descriptor = ManagedAttribute(self._callbacks)
setattr(klass, field_name, descriptor)
descriptor.__set_name__(klass, field_name)
def _create_impl(self) -> Generator[TestCommandScript, None, None]:
for case in sorted(self._test_cases):
yield SetupTestCommandScript(case)
if case <= self._defect.num_cases:
for test_cmd in self._test_command:
yield TestCommandScript(
case,
test_cmd.type,
test_cmd.lines,
)
else:
for test_cmd in self._extra_tests:
yield TestCommandScript(
case,
test_cmd.type,
test_cmd.lines
)
def _create_coverage_impl(self) -> Generator[TestCommandScript, None, None]:
for case in sorted(self._test_cases):
yield SetupTestCommandScript(case)
for test_cmd in self._test_command:
yield CoverageTestCommandScript(
case,
test_cmd.type,
test_cmd.lines
)
for gcov_cmd in self._gcov.command:
yield GcovCommandScript(
case,
gcov_cmd.type,
gcov_cmd.lines
)
yield RunGcovrTestCommandScript(
case,
self._gcov.exclude
)
yield TeardownTestCommandScript(case)
class TestCommand(DockerCommand):
"""
Run test command either with or without coverage.
"""
__test__ = False
def __init__(self):
super().__init__(parser=create_common_project_parser())
# TODO: write argparse description in detail
self.parser.add_argument(
"-c",
"--case",
help="expression to filter cases (see `example <case-example_>`_)",
type=str,
dest="case",
action=ValidateCase,
)
self.parser.add_argument(
"--output-dir",
help="output directory to generate coverage data instead of the current directory.",
type=str,
dest="output_dir",
action=ValidateOutputDirectory,
)
self.parser.usage = "d++ test PATH [--coverage] [-v|--verbose] [-c|--case=expr] [--output-dir=directory]"
self.parser.description = dedent(
"""\
Run testsuite inside docker. The project must have been built previously.
"""
)
self.metadata: Optional[MetaData] = None
self.worktree: Optional[Worktree] = None
self.coverage: Optional[bool] = None
self.output: str = getcwd()
self.coverage_files: List[str] = []
self.failed_coverage_files: List[str] = []
def create_script_generator(
self, args: argparse.Namespace
) -> DockerCommandScriptGenerator:
metadata = self.metadata = args.metadata
worktree = self.worktree = args.worktree
self.coverage = True if args.coverage else False
if args.output_dir:
self.output = args.output_dir
test_command = (
metadata.common.test_coverage_command
if self.coverage
else metadata.common.test_command
)
index = worktree.index
# Select cases to run. If none is given, select all.
selected_defect = metadata.defects[index - 1]
# Get number of extra test cases
number_of_extra_testcases = len(selected_defect.extra_tests) if selected_defect.extra_tests else 0
if not args.case:
cases = set(range(1, selected_defect.num_cases + number_of_extra_testcases + 1))
else:
included_cases, excluded_cases = args.case
if not included_cases:
included_cases = set(range(1, selected_defect.num_cases + number_of_extra_testcases + 1))
cases = included_cases.difference(excluded_cases)
return TestCommandScriptGenerator(
selected_defect,
self.coverage,
test_command,
cases,
[self.script_callback],
metadata,
worktree,
stream=True if args.verbose else False,
)
def setup(self, generator: DockerCommandScriptGenerator):
message.info(__name__, f"'{generator.metadata.name}'")
if not self.coverage:
message.stdout_progress(f"[{generator.metadata.name}] running test suites")
else:
message.stdout_progress(
f"[{generator.metadata.name}] running test suites (coverage)"
)
def teardown(self, generator: DockerCommandScriptGenerator):
message.info(__name__, "done")
message.stdout_progress(f"[{generator.metadata.name}] done")
if self.coverage:
if self.coverage_files:
created = [f" - {c}\n" for c in self.coverage_files]
message.stdout_progress_detail(
f"Successfully created:\n{''.join(created)}"
)
if self.failed_coverage_files:
not_created = [f" - {c}\n" for c in self.failed_coverage_files]
message.stdout_progress_detail(
f"Could not create files:\n{''.join(not_created)}"
)
def summary_dir(self, case: int) -> Path:
"""
Return path where coverage data should be created.
Parameters
----------
case : int
Case number.
Returns
-------
pathlib.Path
"""
p = Path(self.output) / f"{self.metadata.name}-{self.worktree.suffix}-{case}"
if not p.exists():
p.mkdir(parents=True, exist_ok=True)
return p
def script_callback(self, script: TestCommandScript, *args, **kwargs):
"""
Callback function to register used to collect data after each command is executed.
Parameters
----------
script : TestCommandScript
Script instance which has been executed.
Returns
-------
None
"""
if (
type(script) is TestCommandScript
or type(script) is CoverageTestCommandScript
):
self._save_result(script)
elif type(script) is RunGcovrTestCommandScript:
self._save_coverage(cast(GcovCommandScript, script))
else:
pass
def _save_result(self, script: TestCommandScript):
"""
Write exit code and captured stdout to file.
- {case}.output: contains captured output
- {case}.test: either 'passed' or 'failed' string.
It should be invoked only after test command is executed.
Parameters
----------
script : TestCommandScript
Script instance which has been executed.
Returns
-------
None
"""
d | |
<reponame>agoragames/python-valve
# -*- coding: utf-8 -*-
# Copyright (C) 2013 <NAME>
from __future__ import (absolute_import,
unicode_literals, print_function, division)
import collections
import struct
import six
from . import util
NO_SPLIT = -1
SPLIT = -2
class BrokenMessageError(Exception):
pass
class BufferExhaustedError(BrokenMessageError):
def __init__(self, message="Incomplete message"):
BrokenMessageError.__init__(self, message)
def use_default(func):
def use_default(self, value=None, values={}):
if value is None:
return func(self, self.default_value, values)
return func(self, value, values)
return use_default
def needs_buffer(func):
def needs_buffer(self, buffer, *args, **kwargs):
if len(buffer) == 0:
raise BufferExhaustedError
return func(self, buffer, *args, **kwargs)
return needs_buffer
class MessageField(object):
fmt = None
validators = []
def __init__(self, name, optional=False,
default_value=None, validators=[]):
"""
name -- used when decoding messages to set the key in the
returned dictionary
optional -- whether or not a field value must be provided
when encoding
default_value -- if optional is False, the value that is
used if none is specified
validators -- list of callables that return False if the
value they're passed is invalid
"""
if self.fmt is not None:
if self.fmt[0] not in "@=<>!":
self.format = "<" + self.fmt
else:
self.format = self.fmt
if six.PY2:
# Struct only accepts bytes
self.format = self.format.encode("ascii")
self.name = name
self.optional = optional
self._value = default_value
self.validators = self.__class__.validators + validators
@property
def default_value(self):
if self.optional:
if self._value is not None:
return self._value
raise ValueError(
"Field '{fname}' is not optional".format(fname=self.name))
def validate(self, value):
for validator in self.validators:
try:
if not validator(value):
raise ValueError
except Exception:
raise BrokenMessageError(
"Invalid value ({}) for field '{}'".format(
value, self.name))
return value
@use_default
def encode(self, value, values={}):
try:
return struct.pack(self.format, self.validate(value))
except struct.error as exc:
raise BrokenMessageError(exc)
@needs_buffer
def decode(self, buffer, values={}):
"""
Accepts a string of raw bytes which it will attempt to
decode into some Python object which is returned. All
remaining data left in the buffer is also returned which
may be an empty string.
Also acecpts a second argument which is a dictionary of the
fields that have been decoded so far (i.e. occurs before
this field in `fields` tuple). This allows the decoder to
adapt it's funtionality based on the value of other fields
if needs be.
For example, in the case of A2S_PLAYER resposnes, the field
`player_count` needs to be accessed at decode-time to determine
how many player entries to attempt to decode.
"""
field_size = struct.calcsize(self.format)
if len(buffer) < field_size:
raise BufferExhaustedError
field_data = buffer[:field_size]
left_overs = buffer[field_size:]
try:
return (self.validate(
struct.unpack(self.format, field_data)[0]), left_overs)
except struct.error as exc:
raise BrokenMessageError(exc)
class ByteField(MessageField):
fmt = "B"
class StringField(MessageField):
fmt = "s"
@use_default
def encode(self, value, values={}):
return value.encode("utf8") + b"\x00"
@needs_buffer
def decode(self, buffer, values={}):
terminator = buffer.find(b"\x00")
if terminator == -1:
raise BufferExhaustedError("No string terminator")
field_size = terminator + 1
field_data = buffer[:field_size-1]
left_overs = buffer[field_size:]
return field_data.decode("utf8", "ignore"), left_overs
class ShortField(MessageField):
fmt = "h"
class LongField(MessageField):
fmt = "l"
class FloatField(MessageField):
fmt = "f"
class PlatformField(ByteField):
@needs_buffer
def decode(self, buffer, values={}):
byte, remnant_buffer = super(PlatformField,
self).decode(buffer, values)
return util.Platform(byte), remnant_buffer
class ServerTypeField(ByteField):
@needs_buffer
def decode(self, buffer, values={}):
byte, remnant_buffer = super(ServerTypeField,
self).decode(buffer, values)
return util.ServerType(byte), remnant_buffer
class MessageArrayField(MessageField):
"""
Represents a nested message within another message that is
repeated a given number of time (often defined within the
same message.)
"""
def __init__(self, name, element, count=None):
"""
element -- the Message subclass that will attempt to be decoded
count -- ideally a callable that returns the number of
'elements' to attempt to decode; count must also present
a 'minimum' attribute which is minimum number of elements
that must be decoded or else raise BrokenMessageError
If count isn't callable (e.g. a number) it will be
wrapped in a function with the minimum attribute set
equal to the given 'count' value
Helper static methods all(), value_of() and at_least()
are provided which are intended to be used as the
'count' argument, e.g.
MessageArrayField("", SubMessage, MessageArrayField.all())
... will decode all SubMessages within the buffer
"""
MessageField.__init__(self, name)
if count is None:
count = self.all()
# Coerces the count argument to be a callable. For example,
# in most cases count would be a Message.value_of(), however
# if an integer is provided it will be wrapped in a lambda.
self.count = count
if not hasattr(count, "__call__"):
def const_count(values={}):
return count
const_count.minimum = count
self.count = const_count
self.element = element
def encode(self, elements, values={}):
buf = []
for i, element in enumerate(elements):
if not isinstance(element, self.element):
raise BrokenMessageError(
"Element {} ({}) is not instance of {}".format(
i, element, self.element.__name__))
if i + 1 > self.count(values):
raise BrokenMessageError("Too many elements")
buf.append(element.encode())
if len(buf) < self.count.minimum:
raise BrokenMessageError("Too few elements")
return b"".join(buf)
def decode(self, buffer, values={}):
entries = []
count = 0
while count < self.count(values):
# Set start_buffer to the beginning of the buffer so that in
# the case of buffer exhaustion it can return from the
# start of the entry, not half-way through it.
#
# For example if you had the fields:
#
# ComplexField =
# LongField
# ShortField
#
# MessageArrayField(ComplexField,
# count=MessageArrayField.all())
# ByteField()
#
# When attempting to decode the end of the buffer FF FF FF FF 00
# the first four bytes will be consumed by LongField,
# however ShortField will fail with BufferExhaustedError as
# there's only one byte left. However, there is enough left
# for the trailing ByteField. So when ComplexField
# propagates ShortField's BufferExhaustedError the buffer will
# only have the 00 byte remaining. The exception if caught
# and buffer reverted to FF FF FF FF 00. This is passed
# to ByteField which consumes one byte and the reamining
# FF FF FF 00 bytes and stored as message payload.
#
# This is very much an edge case. :/
start_buffer = buffer
try:
entry = self.element.decode(buffer)
buffer = entry.payload
entries.append(entry)
count += 1
except (BufferExhaustedError, BrokenMessageError) as exc:
# Allow for returning 'at least something' if end of
# buffer is reached.
if count < self.count.minimum:
raise BrokenMessageError(exc)
buffer = start_buffer
break
return entries, buffer
@staticmethod
def value_of(name):
"""
Reference another field's value as the argument 'count'.
"""
def field(values={}, f=None):
f.minimum = values[name]
return values[name]
if six.PY3:
field.__defaults__ = (field,)
else:
field.func_defaults = (field,)
return field
@staticmethod
def all():
"""
Decode as much as possible from the buffer.
Note that if a full element field cannot be decoded it will
return all entries decoded up to that point, and reset the
buffer to the start of the entry which raised the
BufferExhaustedError. So it is possible to have addtional
fields follow a MessageArrayField and have
count=MessageArrayField.all() as long as the size of the
trailing fields < size of the MessageArrayField element.
"""
i = [1]
def all_(values={}):
i[0] = i[0] + 1
return i[0]
all_.minimum = -1
return all_
@staticmethod
def at_least(minimum):
"""
Decode at least 'minimum' number of entries.
"""
i = [1]
def at_least(values={}):
i[0] = i[0] + 1
return i[0]
at_least.minimum = minimum
return at_least
class MessageDictField(MessageArrayField):
"""
Decodes a series of key-value pairs from a message. Functionally
identical to MessageArrayField except the results are returned as
a dictionary instead of a list.
"""
def __init__(self, name, key_field, value_field, count=None):
"""
key_field and value_field are the respective components
of the name-value pair that are to be decoded. The fields
should have unique name strings. Tt is assumed that the
key-field comes first, followed by the value.
count is the same as MessageArrayField.
"""
element = type("KeyValueField" if six.PY3 else b"KeyValueField",
(Message,), {"fields": (key_field, value_field)})
self.key_field = key_field
self.value_field = value_field
MessageArrayField.__init__(self, name, element, count)
def decode(self, buffer, values={}):
entries, buffer = MessageArrayField.decode(self, buffer, values)
entries_dict = {}
for entry in entries:
entries_dict[entry[
self.key_field.name]] = entry[self.value_field.name]
return entries_dict, buffer
class Message(collections.Mapping):
fields = ()
def __init__(self, payload=None, **field_values):
self.fields = self.__class__.fields
self.payload = payload
self.values = field_values
def __getitem__(self, key):
return self.values[key]
def __setitem__(self, key, value):
self.values[key] = value
def __delitem__(self, key):
del self.values[key]
def __len__(self):
return len(self.values)
def __iter__(self):
return iter(self.values)
def encode(self, **field_values):
values = dict(self.values, | |
<reponame>fluiddyn/fluiddyn<gh_stars>10-100
"""Fast Fourier transforms (:mod:`fluiddyn.calcul.easypyfft`)
=============================================================
.. autofunction:: fftw_grid_size
Provides classes for performing fft in 1, 2, and 3 dimensions:
.. autoclass:: FFTP2D
:members:
.. autoclass:: FFTW2DReal2Complex
:members:
.. autoclass:: FFTW3DReal2Complex
:members:
.. autoclass:: FFTW1D
:members:
.. autoclass:: FFTW1DReal2Complex
:members:
"""
import os
from time import time
import numpy as np
from ..util.mpi import nb_proc, printby0
try:
import scipy.fftpack as fftp
except ImportError:
pass
if "OMP_NUM_THREADS" in os.environ:
nthreads = int(os.environ["OMP_NUM_THREADS"])
else:
nthreads = 1
def fftw_grid_size(nk, bases=[2, 3, 5, 7, 11, 13], debug=False):
"""Find the closest multiple of prime powers greater than or equal to nk
using Mixed Integer Linear Programming (MILP). Useful while setting the
grid-size to be compatible with FFTW.
Parameters
----------
nk : int
Lower bound for the spectral grid size.
bases : array-like, optional
List of bases, typically prime numbers.
debug : bool, optional
Print useful messages.
Returns
-------
int
"""
if {2, 3, 5} == set(bases):
if debug:
print("Using scipy.fftpack.next_fast_len")
return fftp.next_fast_len(nk)
elif {2, 3, 5, 7, 11, 13} == set(bases):
try:
import pyfftw
return pyfftw.next_fast_len(nk)
except (ImportError, AttributeError):
pass
else:
if debug:
print("Using pyfftw.next_fast_len")
if not {2, 3, 5, 7, 11, 13}.issuperset(bases):
raise ValueError(
"FFTW only supports bases which are a subset of "
"{2, 3, 5, 7, 11, 13}."
)
import pulp
prob = pulp.LpProblem("FFTW Grid-size Problem")
bases = np.array(bases)
bases_order1 = bases[bases < 10]
bases_order2 = bases[bases >= 10]
exp_max = np.ceil(np.log2(nk))
exps = pulp.LpVariable.dicts(
"exponent_o1", bases_order1, 0, exp_max, cat=pulp.LpInteger
)
exps.update(
pulp.LpVariable.dicts(
"exponent_o2", bases_order2, 0, 1, cat=pulp.LpInteger
)
)
log_nk_new = pulp.LpVariable("log_grid_size", 0)
log_nk_new = pulp.lpDot(exps.values(), np.log(bases))
prob += log_nk_new # Target to be minimized
# Subject to:
prob += log_nk_new >= np.log(nk), "T1"
if {11, 13}.issubset(bases):
prob += exps[11] + exps[13] <= 1, "T2"
if debug:
print("bases =", bases)
print("exponents =", exps)
print("log_nk_new =", log_nk_new)
# prob.writeLP("FFTWGridSizeOptimizationModel.lp")
prob.solve()
if debug:
print("Status:", pulp.LpStatus[prob.status])
for v in prob.variables():
print(v.name, "=", v.varValue)
if pulp.LpStatus[prob.status] == "Infeasible":
raise ValueError(f"Not enough bases: {bases}")
exps_solution = [v.varValue for v in prob.variables()]
nk_new = np.prod(np.power(bases, exps_solution))
return int(nk_new)
class BaseFFT:
def run_tests(self):
arr = np.random.rand(*self.shapeX)
arr_fft = self.fft(arr)
arr = self.ifft(arr_fft)
arr_fft = self.fft(arr)
nrj = self.compute_energy_from_spatial(arr)
nrj_fft = self.compute_energy_from_Fourier(arr_fft)
assert np.allclose(nrj, nrj_fft), (nrj, nrj_fft, nb_proc * nrj_fft - nrj)
arr2_fft = np.zeros(self.shapeK, dtype=np.complex128)
self.fft_as_arg(arr, arr2_fft)
nrj2_fft = self.compute_energy_from_Fourier(arr2_fft)
assert np.allclose(nrj, nrj2_fft)
arr2 = np.empty(self.shapeX)
self.ifft_as_arg(arr_fft, arr2)
nrj2 = self.compute_energy_from_spatial(arr2)
assert np.allclose(nrj, nrj2)
def run_benchs(self, nb_time_execute=10):
arr = np.zeros(self.shapeX)
arr_fft = np.zeros(self.shapeK, dtype=np.complex128)
times = []
for i in range(nb_time_execute):
t_start = time()
self.fft_as_arg(arr, arr_fft)
times.append(time() - t_start)
time_fft = np.mean(times)
times = []
for i in range(nb_time_execute):
t_start = time()
self.ifft_as_arg(arr_fft, arr)
times.append(time() - t_start)
time_ifft = np.mean(times)
name = self.__class__.__name__
printby0(
"Internal bench (" + name + ")\n"
"time fft ({}): {:.6f} s\n".format(name, time_fft)
+ f"time ifft ({name}): {time_ifft:.6f} s"
)
return time_fft, time_ifft
def get_short_name(self):
return self.__class__.__name__.lower()
def compute_energy_from_X(self, fieldX):
return np.mean(fieldX ** 2 / 2.0)
def get_local_size_X(self):
return np.prod(self.shapeX)
def get_shapeK_seq(self):
return self.shapeK
get_shapeK_loc = get_shapeK_seq
def get_shapeX_seq(self):
return self.shapeX
get_shapeX_loc = get_shapeX_seq
class FFTP2D(BaseFFT):
"""A class to use fftp"""
def __init__(self, nx, ny):
if nx % 2 != 0 or ny % 2 != 0:
raise ValueError("nx and ny should be even")
self.nx = nx
self.ny = ny
self.shapeX = (ny, nx)
self.nkx = int(float(nx) / 2 + 1)
self.shapeK = self.shapeK_seq = self.shapeK_loc = (ny, self.nkx)
self.coef_norm = nx * ny
self.fft2d = self.fft
self.ifft2d = self.ifft
def fft(self, ff):
if not (isinstance(ff[0, 0], float)):
print("Warning: not array of floats")
big_ff_fft = fftp.fft2(ff) / self.coef_norm
small_ff_fft = big_ff_fft[:, 0 : self.nkx]
return small_ff_fft
def ifft(self, small_ff_fft, ARG_IS_COMPLEX=False):
if not (isinstance(small_ff_fft[0, 0], complex)):
print("Warning: not array of complexes")
# print('small_ff_fft\n', small_ff_fft)
big_ff_fft = np.empty(self.shapeX, dtype=np.complex128)
big_ff_fft[:, 0 : self.nkx] = small_ff_fft
for iky in range(self.ny):
big_ff_fft[iky, self.nkx :] = small_ff_fft[
-iky, self.nkx - 2 : 0 : -1
].conj()
# print('big_ff_fft final\n', big_ff_fft)
result_ifft = fftp.ifft2(big_ff_fft * self.coef_norm)
if np.max(np.imag(result_ifft)) > 10 ** (-8):
print(
"ifft2: imaginary part of ifft not equal to zero,",
np.max(np.imag(result_ifft)),
)
return np.real(result_ifft)
def fft_as_arg(self, field, field_fft):
field_fft[:] = self.fft(field)
def ifft_as_arg(self, field_fft, field):
field[:] = self.ifft(field_fft)
def compute_energy_from_Fourier(self, ff_fft):
return (
np.sum(abs(ff_fft[:, 0]) ** 2 + abs(ff_fft[:, -1]) ** 2)
+ 2 * np.sum(abs(ff_fft[:, 1:-1]) ** 2)
) / 2
def compute_energy_from_spatial(self, ff):
return np.mean(abs(ff) ** 2) / 2
class BasePyFFT(BaseFFT):
def __init__(self, shapeX):
try:
import pyfftw
except ImportError as err:
raise ImportError(
"ImportError {0}. Instead fftpack can be used (?)", err
)
if isinstance(shapeX, int):
shapeX = [shapeX]
shapeK = list(shapeX)
shapeK[-1] = shapeK[-1] // 2 + 1
shapeK = tuple(shapeK)
self.shapeX = shapeX
self.shapeK = self.shapeK_seq = self.shapeK_loc = shapeK
self.empty_aligned = pyfftw.empty_aligned
self.arrayX = pyfftw.empty_aligned(shapeX, np.float64)
self.arrayK = pyfftw.empty_aligned(shapeK, np.complex128)
axes = tuple(range(len(shapeX)))
self.fftplan = pyfftw.FFTW(
input_array=self.arrayX,
output_array=self.arrayK,
axes=axes,
direction="FFTW_FORWARD",
threads=nthreads,
)
self.ifftplan = pyfftw.FFTW(
input_array=self.arrayK,
output_array=self.arrayX,
axes=axes,
direction="FFTW_BACKWARD",
threads=nthreads,
)
self.coef_norm = np.prod(shapeX)
self.inv_coef_norm = 1.0 / self.coef_norm
def fft(self, fieldX):
fieldK = self.empty_aligned(self.shapeK, np.complex128)
self.fftplan(
input_array=fieldX, output_array=fieldK, normalise_idft=False
)
return fieldK / self.coef_norm
def ifft(self, fieldK):
fieldX = self.empty_aligned(self.shapeX, np.float64)
# This copy is needed because FFTW_DESTROY_INPUT is used.
# See pyfftw.readthedocs.io/en/latest/source/pyfftw/pyfftw.html
self.arrayK[:] = fieldK
self.ifftplan(
input_array=self.arrayK, output_array=fieldX, normalise_idft=False
)
return fieldX
def fft_as_arg(self, fieldX, fieldK):
self.fftplan(
input_array=fieldX, output_array=fieldK, normalise_idft=False
)
fieldK *= self.inv_coef_norm
def ifft_as_arg(self, fieldK, fieldX):
# This copy is needed because FFTW_DESTROY_INPUT is used.
# See pyfftw.readthedocs.io/en/latest/source/pyfftw/pyfftw.html
# fieldK = fieldK.copy()
# self.ifftplan(input_array=fieldK, output_array=fieldX,
# this seems faster (but it could depend on the size)
self.arrayK[:] = fieldK
self.ifftplan(
input_array=self.arrayK, output_array=fieldX, normalise_idft=False
)
def ifft_as_arg_destroy(self, fieldK, fieldX):
self.ifftplan(
input_array=fieldK, output_array=fieldX, normalise_idft=False
)
def compute_energy_from_Fourier(self, ff_fft):
result = self.sum_wavenumbers(abs(ff_fft) ** 2) / 2
return result
compute_energy_from_K = compute_energy_from_Fourier
def compute_energy_from_spatial(self, ff):
return np.mean(abs(ff) ** 2) / 2
def project_fft_on_realX(self, ff_fft):
return self.fft(self.ifft(ff_fft))
def get_is_transposed(self):
return False
def create_arrayX(self, value=None):
"""Return a constant array in real space."""
shapeX = self.shapeX
field = self.empty_aligned(shapeX)
if value is not None:
field.fill(value)
return field
def create_arrayK(self, value=None):
"""Return a constant array in real space."""
shapeK = self.shapeK
field = self.empty_aligned(shapeK, dtype=np.complex128)
if value is not None:
field.fill(value)
return field
class FFTW2DReal2Complex(BasePyFFT):
"""A class to use fftw"""
def __init__(self, nx, ny):
shapeX = (ny, nx)
super().__init__(shapeX)
self.fft2d = self.fft
self.ifft2d = self.ifft
def sum_wavenumbers(self, ff_fft):
if self.shapeX[1] % 2 == 0:
return (
np.sum(ff_fft[:, 0])
+ np.sum(ff_fft[:, -1])
+ 2 * np.sum(ff_fft[:, 1:-1])
)
else:
return np.sum(ff_fft[:, 0]) + 2 * np.sum(ff_fft[:, 1:])
def get_seq_indices_first_K(self):
return 0, 0
def get_seq_indices_first_X(self):
return 0, 0
def get_x_adim_loc(self):
"""Get the coordinates of the points stored locally.
Returns
-------
x0loc : np.ndarray
x1loc : np.ndarray
The indices correspond to the index of the dimension in real space.
"""
nyseq, nxseq = self.get_shapeX_seq()
ix0_start, ix1_start = self.get_seq_indices_first_X()
nx0loc, nx1loc = self.get_shapeX_loc()
x0loc = np.array(range(ix0_start, ix0_start + nx0loc))
x1loc = np.array(range(ix1_start, ix1_start + nx1loc))
return x0loc, x1loc
def get_k_adim_loc(self):
"""Get the non-dimensional wavenumbers stored locally.
Returns
-------
k0_adim_loc : np.ndarray
k1_adim_loc : np.ndarray
The indices correspond to the index of the dimension in spectral space.
"""
nyseq, nxseq = self.get_shapeX_seq()
kyseq = np.array(
list(range(nyseq // 2 + 1)) + list(range(-nyseq // 2 + 1, 0))
)
kxseq = np.array(range(nxseq // 2 + 1))
if self.get_is_transposed():
k0seq, k1seq = kxseq, kyseq
else:
k0seq, k1seq = kyseq, kxseq
ik0_start, ik1_start = self.get_seq_indices_first_K()
nk0loc, nk1loc = self.get_shapeK_loc()
k0_adim_loc = k0seq[ik0_start : ik0_start + nk0loc]
k1_adim_loc = k1seq[ik1_start : ik1_start + nk1loc]
return k0_adim_loc, k1_adim_loc
class FFTW3DReal2Complex(BasePyFFT):
"""A class to use fftw"""
def __init__(self, nx, ny, nz):
shapeX = (nz, ny, nx)
super().__init__(shapeX)
self.fft3d = self.fft
self.ifft3d = self.ifft
def sum_wavenumbers(self, ff_fft):
if self.shapeX[2] % 2 == 0:
return (
np.sum(ff_fft[:, :, 0])
+ np.sum(ff_fft[:, :, -1])
+ 2 * np.sum(ff_fft[:, :, 1:-1])
)
else:
return np.sum(ff_fft[:, :, 0]) + 2 * np.sum(ff_fft[:, :, 1:])
def get_k_adim(self):
nK0, nK1, nK2 = self.shapeK
kz_adim_max = nK0 // 2
kz_adim_min = -((nK0 - 1) // 2)
ky_adim_max = nK1 // 2
ky_adim_min = -((nK1 - 1) // 2)
return (
np.r_[0 : kz_adim_max + 1, kz_adim_min:0],
np.r_[0 : ky_adim_max | |
<filename>skyrimse/views.py
from django.http import HttpResponse
from django.shortcuts import render, redirect
from skyrimse.models import *
from skyrimse.forms import *
from mongoengine.context_managers import switch_db
from mongoengine.base.datastructures import EmbeddedDocumentList
from datetime import datetime
from urllib.parse import unquote
from .customScripts import plotRadars
from json import load, loads, dumps, dump
import os
##########################
##### Shared Methods #####
##########################
def updateProgressCompletion(source, vanillaSection, modSection, progress):
if(source in ("vanilla", "dawnguard", "dragonborn", "hearthfire")):
progress["collected"][vanillaSection] += 1
progress["collected"]["total"] += 1
progress["completion"]["vanilla"] = (progress.collected.total / progress.collectedTotal.total) * 100
else:
progress["collected"][modSection] += 1
progress["collected"]["modTotal"] += 1
progress["completion"]["mod"] = (progress.collected.modTotal / progress.collectedTotal.modTotal) * 100
progress.save()
def generateData(obj, objDir, objStr, category):
# Pull all the Objects
allOBj = obj.objects.all()
# Dynamically load quest sources
objFiles = [f for f in os.listdir(objDir)]
data = {"type": objStr, "counts": {},"load": "load{}".format(objStr), "progress": []}
allNames = set()
for f in objFiles:
source = f.replace("{}.json".format(objStr), "")
data["counts"][source] = len(obj.objects(source=source))
# Determine if all the data is loaded and if we're dealing with an embeddedDocumentList
data["allLoaded"] = False if False in [data["counts"][source] > 0 for source in data["counts"]] else True
if(allOBj):
data["category"] = True if isinstance(allOBj[0][category], EmbeddedDocumentList) else False
# Load Character Completion Data
for doc in Progress.objects.all():
difficulty = doc.difficulty
stats = {"difficulty": difficulty, "complete": 0,"total": 0,
"target": "collapse{}".format(difficulty), "sources": {}}
for o in allOBj:
source = o["source"]
# If the desired field is an embeddedDocument list
if(isinstance(o[category], EmbeddedDocumentList)):
if(not stats["sources"].get(source)):
stats["sources"][source] = {"complete": 0, "total": 0}
# Check the completion data for each embeddedDocument
for embedded in o[category]:
if(embedded["completion"][difficulty] > 0):
stats["sources"][source]["complete"] += 1
stats["complete"] += 1
stats["sources"][source]["total"] += 1
stats["total"] += 1
allNames.add(o.name)
else:
# Otherwise, add completion difficulty per category
categ = o[category]
if(not stats["sources"].get(source)):
stats["sources"][source] = {}
if(not stats["sources"][source].get(categ)):
stats["sources"][source][categ] = {"complete": 0, "total": 0}
if(o["completion"][difficulty] > 0):
stats["sources"][source][categ]["complete"] += 1
stats["complete"] += 1
stats["sources"][source][categ]["total"] += 1
stats["total"] += 1
allNames.add(o.name)
# Sort the names
data["all"] = sorted(allNames)
data["progress"].append(stats)
return data
def generateDetails(obj, objClass, request, embedded):
# Pull quest name from the HTTP response.path
name = unquote(request.path.split("details/")[1])
temp = obj.objects(name=name).first()
docs = Progress.objects.all()
if(embedded == None):
data = {objClass: temp,
"completion": {"novice": {"times": temp.completion.novice, "started": None},
"apprentice": {"times": temp.completion.apprentice, "started": None},
"adept": {"times": temp.completion.adept, "started": None},
"expert": {"times": temp.completion.expert, "started": None},
"master": {"times": temp.completion.master, "started": None},
"legendary": {"times": temp.completion.legendary, "started": None}}}
for doc in docs:
data["completion"][doc.difficulty]["started"] = True
else:
data = {objClass: temp, "embedded": []}
for embedded in temp[embedded]:
embeddedTemp = {"embeddedObj": embedded,
"completion": {"novice": {"times": embedded.completion.novice, "started": None},
"apprentice": {"times": embedded.completion.apprentice, "started": None},
"adept": {"times": embedded.completion.adept, "started": None},
"expert": {"times": embedded.completion.expert, "started": None},
"master": {"times": embedded.completion.master, "started": None},
"legendary": {"times": embedded.completion.legendary, "started": None}}}
for doc in docs:
embeddedTemp["completion"][doc.difficulty]["started"] = True
data["embedded"].append(embeddedTemp)
return data
def createBackup(difficulty):
print("Made it into createBackup()")
# Pull Progress Object Data
progress = Progress.objects(difficulty=difficulty).first()
if(not Backup.objects(difficulty=difficulty).first()):
backup = Backup()
else:
backup = Backup.objects(difficulty=difficulty).first()
backup["ts"] = datetime.strftime(datetime.now(), "%A %B %d, %Y %H:%M:%S:%f %Z")
backup["difficulty"] = difficulty
backup["level"] = progress["level"]
backup["health"] = progress["health"]
backup["magicka"] = progress["magicka"]
backup["stamina"] = progress["stamina"]
backup["skills"] = progress["skills"]
backup["completion"] = progress["completion"]
for obj in [{"key": "quests", "object": Quest}, {"key": "perks", "object": Perk}, {"key": "locations", "object": Location},
{"key": "spells", "object": Spell}, {"key": "enchantments", "object": Enchantment}, {"key": "weapons", "object": Weapon},
{"key": "armors", "object": Armor}, {"key": "jewelry", "object": Jewelry}, {"key": "books", "object": Book},
{"key": "keys", "object": Key}, {"key": "collectibles", "object": Collectible}, {"key": "words", "object": Shout},
{"key": "ingredients", "object": Ingredient}]:
backup[obj["key"]] = {"sources": {}, "count": 0, "modCount": 0}
for o in obj["object"].objects.all():
if(o["source"] in ("vanilla", "dragonborn", "dawnguard", "hearthfire")):
backup[obj["key"]]["count"] += 1
else:
backup[obj["key"]]["modCount"] += 1
if(obj["key"] == "words"):
tempWord = {"shout": o["name"], "words": []}
for word in o["words"]:
if(word["completion"][difficulty] > 0):
if(not backup[obj["key"]]["sources"].get(o["source"])):
backup[obj["key"]]["sources"][o["source"]] = []
tempWord["words"].append(word["translation"])
if(tempWord["words"]):
backup[obj["key"]]["sources"][o["source"]].append(tempWord)
elif(obj["key"] == "ingredients"):
tempIngredient = {"ingredient": o["name"], "effects": []}
for effect in o["effects"]:
if(effect["completion"][difficulty] > 0):
if(not backup[obj["key"]]["sources"].get(o["source"])):
backup[obj["key"]]["sources"][o["source"]] = []
tempIngredient["effects"].append(effect["name"])
if(tempIngredient["effects"]):
backup[obj["key"]]["sources"][o["source"]].append(tempIngredient)
else:
if(o["completion"][difficulty] > 0):
if(not backup[obj["key"]]["sources"].get(o["source"])):
backup[obj["key"]]["sources"][o["source"]] = []
backup[obj["key"]]["sources"][o["source"]].append(o["name"])
backup.save()
#####################
##### Home Page #####
#####################
def index(request):
return render(request, 'skyrimseIndex.html')
############################
##### Progress Related #####
############################
def progress(request):
# Pull list of progress objects
docs = Progress.objects.all()
data = {"novice": {"started": False}, "apprentice": {"started": False}, "adept": {"started": False},
"expert": {"started": False}, "master": {"started": False}, "legendary": {"started": False}}
# Set whether an object exists
for doc in docs:
data[doc.difficulty] = {"started": True, "level": doc.level,
"health": doc.health,
"magicka": doc.magicka,"stamina": doc.stamina,
"completion": {"vanilla": doc.completion.vanilla,
"mod": doc.completion.mod, "lastBackup": None}}
for backup in Backup.objects.all():
data[backup.difficulty]["lastBackup"] = backup["ts"]
return render(request, 'skyrimseProgress.html', {'data': data})
def addDifficulty(request):
# Start a new progress object
progress = Progress()
# Pull difficulty from HTTP Request.path
difficulty = request.path.split("=")[1]
# Set/save starting info for a progress object
progress.created = datetime.strftime(datetime.now(), "%A %B %d, %Y %H:%M:%S:%f %Z")
progress.difficulty = difficulty
progress.level = 1
progress.health = 100
progress.magicka = 100
progress.stamina = 100
progress.completion = Completion(vanilla=0, mod=0)
progress.skills = Skills(alchemy=Skill(level=15, legendary=0), alteration=Skill(level=15, legendary=0),
archery=Skill(level=15, legendary=0), block=Skill(level=15, legendary=0),
conjuration=Skill(level=15, legendary=0), destruction=Skill(level=15, legendary=0),
enchanting=Skill(level=15, legendary=0), heavyArmor=Skill(level=15, legendary=0),
illusion=Skill(level=15, legendary=0), lightArmor=Skill(level=15, legendary=0),
lockpicking=Skill(level=15, legendary=0), oneHanded=Skill(level=15, legendary=0),
pickPocket=Skill(level=15, legendary=0), restoration=Skill(level=15, legendary=0),
smithing=Skill(level=15, legendary=0), sneak=Skill(level=15, legendary=0),
speech=Skill(level=15, legendary=0), twoHanded=Skill(level=15, legendary=0))
# Get Quest Count
questCount = sum([len(Quest.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]])
modQuestCount = len(Quest.objects.all()) - questCount
# Get Perk Count
perkCount = sum([len(Perk.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]])
modPerkCount = len(Perk.objects.all()) - perkCount
# Get Word(Shout) Count
wordCount = sum([len(Shout.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]]) * 3
modWordCount = (len(Shout.objects.all()) * 3) - wordCount
# Get Location Count
locationCount = sum([len(Location.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]])
modLocationCount = (len(Location.objects.all())) - locationCount
# Get Spell Count
spellCount = sum([len(Spell.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]])
modSpellCount = (len(Spell.objects.all())) - spellCount
# Get Enchantment Count
enchantmentCount = sum([len(Enchantment.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]])
modEnchantmentCount = (len(Enchantment.objects.all())) - enchantmentCount
# Get Ingredient Count
ingredientCount = sum([len(Ingredient.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]])
modIngredientCount = (len(Ingredient.objects.all())) - ingredientCount
# Get Weapon Count
weaponCount = sum([len(Weapon.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]])
modWeaponCount = (len(Weapon.objects.all())) - weaponCount
# Get Armor Count
armorCount = sum([len(Armor.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]])
modArmorCount = (len(Armor.objects.all())) - armorCount
# Get Jewelry Count
jewelryCount = sum([len(Jewelry.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]])
modJewelryCount = (len(Jewelry.objects.all())) - jewelryCount
# Get Book Count
bookCount = sum([len(Book.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]])
modBookCount = (len(Book.objects.all())) - bookCount
# Get Key Count
keyCount = sum([len(Key.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]])
modKeyCount = (len(Key.objects.all())) - keyCount
# Get Collectible Count
collectibleCount = sum([len(Collectible.objects(source=source)) for source in ["vanilla", "dawnguard", "dragonborn", "hearthfire"]])
modCollectibleCount = (len(Collectible.objects.all())) - collectibleCount
# Get Total Count
totalCount = sum([questCount, perkCount, wordCount, locationCount, spellCount, enchantmentCount,
ingredientCount, weaponCount, armorCount, jewelryCount, bookCount, keyCount, collectibleCount])
modTotalCount = sum([modQuestCount, modPerkCount, modWordCount, modLocationCount, modSpellCount,
modEnchantmentCount, modIngredientCount, modWeaponCount, modArmorCount, modJewelryCount,
modBookCount, modKeyCount, modCollectibleCount])
# Add Counts to Progress object
progress.collected = Collected(quests=0, modQuests=0, perks=0, modPerks=0,
words=0, modWords=0, locations=0, modLocations=0, spells=0, modSpells=0, enchantments=0,
modEnchantments=0, ingredients=0, modIngredients=0, weapons=0, modWeapons=0, armors=0,
modArmors=0, jewelry=0, modJewelry=0, books=0, modBooks=0, keys=0, modKeys=0, collectibles=0,
modCollectibles=0, total=0, modTotal=0)
progress.collectedTotal = Collected(quests=questCount, modQuests=modQuestCount, perks=perkCount,
words=wordCount, modWords=modWordCount, modPerks=modPerkCount, locations=locationCount,
modLocations=modLocationCount, spells=spellCount, modSpells=modSpellCount,
enchantments=enchantmentCount, modEnchantments=modEnchantmentCount, ingredients=ingredientCount,
modIngredients=modIngredientCount, weapons=weaponCount, modWeapons=modWeaponCount, armors=armorCount,
modArmors=modArmorCount, jewelry=jewelryCount, modJewelry=modJewelryCount, books=bookCount,
modBooks=modBookCount, keys=keyCount, modKeys=modKeyCount, collectibles=collectibleCount,
modCollectibles=modCollectibleCount,total=totalCount, modTotal=modTotalCount)
progress.save()
# Create a Backup
createBackup(difficulty=difficulty)
# Generate a Radar Graph for the progress
skillLevels = {"Alchemy": 15, "Alteration": 15, "Archery": 15, "Block": 15, "Conjuration": 15,
"Destruction": 15, "Enchanting": 15, "Heavy Armor": 15, "Illusion": 15, "Light Armor": 15,
"Lockpicking": 15, "One-Handed": 15, "Pickpocket": 15, "Restoration": 15, "Smithing": 15,
"Sneak": 15, "Speech": 15, "Two-Handed": 15}
plotRadars(values=skillLevels, difficulty=progress.difficulty)
return redirect("/skyrimse/progress")
def deleteDifficulty(request):
# Pull progress.id from HTTP request.path
deleteID = request.path.split("=")[1]
progress = Progress.objects(id=deleteID).first()
# Delete the Radar Graph graph
for area in ["combat", "magic", "stealth"]:
strFile = "skyrimse/static/images/progress/{area}-skills-{difficulty}.png".format(area=area, difficulty=progress.difficulty)
if(os.path.isfile(strFile)):
os.remove(strFile)
progress.delete()
# Delete Generic Objects
for obj in [Quest, Perk, Location, Spell, Enchantment, Weapon, Armor, Jewelry,
Book, Key, Collectible]:
for o in obj.objects.all():
o["completion"][progress.difficulty] = 0
o.save()
# Delete Shout Data
for shout in | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri April 3 13:12:06 2020
@author: Omer
"""
"""
User Guide
1- Create a directory named as "data_augmentation" in and put this dataaugmentation.py file in it.
2- Collect the images for your dataset and carete a subdirector inside parent such as "images_to_augment". Then,
i- Either put all the images in the directory "images_to_augment" or
ii- Put those images in "images_to_augment" which you need to augment
3- Please do not change the imported liberaries even some are not used
4- After rotation and affine transformation, image sizes would increase than your required size, for instance, if the input image is (640 x 640 x 3); then, after rotation its size will increase with respect to your angle of rotation.
**** please place all augmented images into a new directory "images_to_resize" and comment all the the menthods in augment_images() methods in main() except resize images. Then, change the path of images folders from "images_to_aument" to "images_to_resize". Run the files again to get the final augmented images of your required size such as (640 x 640 x 3).
5- All images are not passed through the operation such as change in brightness, contrast and transformation. The image path's have been split by using sciket-learn. So, the ratio of images for operation scan be set in 'augment_images()' in main().
"Sample Director Structure"
-data_augmentation
-- images_to_augment
--images_resize
--rotated_images
--resized_images
--changed_brightness
--changed_contrast
--transformesd_images
--augmented_images
"""
"""
Import the required libraries
"""
import numpy as np
import matplotlib
import os
import cv2
from PIL import Image, ImageChops, ImageDraw, ImageOps, ImageFilter, ImageStat, ImageEnhance
import imutils
import ntpath
import sys
import matplotlib.pyplot as plt
import glob
import math
#from blend_modes import blend_modes
from datetime import datetime
import random
from sklearn.model_selection import train_test_split
#import grabcut
global counter
counter = 0
class DataAugmentation:
def __init__(self):
self.current_path = os.getcwd()
'''
Importing the paths of the image files inside the directory
'''
def load_paths(self, directory: str) -> list:
self.paths = []
for files in sorted(os.listdir(directory)):
if (files != ".DS_Store"):
self.paths.append(directory+'/'+files)
return self.paths
"""
To make the system general for every typr of dataset, directories for every operation will be created atonomously. The images after with specific operation will be placed in specific folders.
1- resized_images -> contains the images after resizing
2- rotated_images -> contains images after rotations
3- changed_brightness -> contains the images after brightness manipulation
4- changed_contrast -> contains the images after contrast stretching
5- transformed_images -> contains the images after apllication of affine transfprmation for shearing of images
6- augmented_images -> contains the actual outputs of augmneted images after complete augmentation process. These are our actual output images whcih will be our dataset and have to be labeled.
"""
def make_directories(self):
if (not os.path.exists("resized_images")):
os.mkdir("resized_images")
if (not os.path.exists("rotated_images")):
os.mkdir("rotated_images")
if (not os.path.exists("changed_brightness")):
os.mkdir("changed_brightness")
if (not os.path.exists("changed_contrast")):
os.mkdir("changed_contrast")
if (not os.path.exists("augmented_dataset")):
os.mkdir("augmented_dataset")
if (not os.path.exists("Transformed_Images")):
os.makedirs("Transformed_Images")
"""
------------------------------------------------------------------------------
Resize the size of the images
width = maximum width of the image (Which you want to have in output image)
height = Maximum height of the image (Which you want to have in output images)
Usually we want to have square images where Width = Height
output_dim = (width, height) (Output dimentions of the image)
------------------------------------------------------------------------------
"""
def resize_image(self,width: int, height: int) -> int:
global counter
self.paths = self.load_paths("images_to_augment")
# Spacified the size of the output image
self.output_dim = (width, height)
# Going to loop over all the images in the folder to resize them
for img_path in self.paths:
#print(img_path)
# Reading image as numpy array
self.img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
# resize image
self.resized_img = cv2.resize(self.img, self.output_dim, interpolation = cv2.INTER_AREA)
# Writing image into output directory
cv2.imwrite("resized_images/"+str(counter)+".jpg",self.resized_img)
counter += 1
counter = 2505
return self.paths, counter
###############################################################################
#if __name__ == "__main__":
#image_paths = load_paths("Drones")
#resize_image(640,640)
###############################################################################
"""
To Rotate the images either on
90,180,270 or 360/0 degrees
360/0 degrees will give us the original positioned images
Angles have to spacified by the user
"""
def rotate_images(self) -> int:
global counter
self.paths = self.load_paths("resized_images")
for image_path in self.paths:
self.image = cv2.imread(image_path)
self.head, self.tail = ntpath.split(image_path)
self.title,self.extension = self.tail.split('.')
self.angles_to_rotate = [-18,18]
#for angle in np.arange(135, 360, 110):
for angle in self.angles_to_rotate:
self.rotated = imutils.rotate_bound(self.image, angle)
cv2.imwrite("rotated_images/"+str(counter)+".jpg",self.rotated)
counter += 1
#cv2.imwrite("Transformed_Images/"+title+"/"+str(t[19])+".png",dst19)
counter = 2505
return counter
"""
------------------------------------------------------------------------------
Gamma correction usually changing the apearance if colours in the image are in a same way
as the human eye will percieve the colours.
Reference
https://www.cambridgeincolour.com/tutorials/gamma-correction.htm
Gamma values < 1 will make the image brighter just by brightening the dark pixels in the images
Gamma values > 1 will darken the images to make bright pixels more visible na dark pixels to go
darker
Inputes:
Image (array) file
Basic start value of Gamma (1.0)
Outputs:
Gamma stretched images with different rannge of gamma
"""
def adjust_gamma(self,image, gamma= 1.0):
global counter
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
self.invGamma = 1.0 / gamma
#print('processed_1')
self.table = np.array([((i / 255.0) ** self.invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
#print('processed_2')
# apply gamma correction using the lookup table
return cv2.LUT(image, self.table)
"""
Brightness is an attribute of visual perception in which a source appears to be radiating
or reflecting light. In other words, brightness is the perception elicited
by the luminance of a visual target. It is not necessarily proportional to luminance.
Inputs: Paths to images
Outputs: image files with 3 different brightness values
'** however these values can be defined by the user by changing the gamma values'
"""
def change_brightness(self, path: str) -> int:
global counter
self.image = cv2.imread(path)
self.head, self.tail = ntpath.split(path)
self.title,self.extension = self.tail.split('.')
for gamma in np.arange(0.75, 1.0, 0.5):
# ignore when gamma is 1 (there will be no change to the image)
if gamma == 1:
continue
# apply gamma correction and show the images
gamma = gamma if gamma > 0 else 0.1
self.adjusted = self.adjust_gamma(self.image, gamma=gamma)
cv2.imwrite("changed_brightness/"+self.title+".jpg",self.adjusted)
cv2.imwrite("augmented_dataset/"+self.title+".jpg",self.adjusted)
counter += 1
return counter
def change_contrast(self, img, level):
global counter
self.factor = (259 * (level + 255)) / (255 * (259 - level))
def contrast(c):
self.value = 128 + self.factor * (c - 128)
return max(0, min(255, self.value))
return img.point(contrast)
"""
Contrast is the difference in luminance or colour that makes an object
(or its representation in an image or display) distinguishable.
In visual perception of the real world, contrast is determined by the difference
in the color and brightness of the object and other objects within the same field of view.
Inputs:
path of image
Outputs:
image with changed contrast
counter value = 1
'** however the contanst values can be defined by user by changing the values for step variable'
"""
def change_contrast_multi(self, path: str) -> int:
global counter
self.steps = []
for step in range(-50, 0, 100):
self.steps.append(step)
self.img = Image.open(path)
self.head, self.tail = ntpath.split(path)
self.title, self.extension = self.tail.split('.')
self.width, self.height = self.img.size
self.contrast_image = Image.new('RGB', (self.width * len(self.steps), self.height))
for n, level in enumerate(self.steps):
self.img_filtered = self.change_contrast(self.img, level)
self.contrast_image.paste(self.img_filtered, (self.width * n, 0))
self.img_filtered.save("changed_contrast/"+self.title+".jpg")
self.img_filtered.save("augmented_dataset/"+self.title+".jpg")
counter += 1
#counter = 1
return counter
"""
Affine transformation is used for sheating the images at certain level
Therefore, I am applying affine transformations to shear the images which will change the shape of objects
in th images and their position at certain level
Inputs:
path of image
Outputs:
image with shearing
counter value = 1
"""
def img_transform(self, paths: str) -> int:
global counter
self.img = cv2.imread(paths, cv2.IMREAD_UNCHANGED)
self.rows,self.cols,self.ch = self.img.shape
self.head, self.tail = ntpath.split(paths)
self.title, self.extension = self.tail.split('.')
self.t = []
for i in range(0,100):
self.t.append(i)
self.matrix_to_apply = random.randint(1,9)
| |
<gh_stars>0
'''
Created on Mar 10, 2016
@author: <NAME>
'''
# Imports
import sys
import multiprocessing
import math
# Constants
MAX_X = 500
MAX_Y = 500
STEPS_MAX = 200
UNCHANGED_MAX = 10
NSTEPS_INDEX = 0
VEGIES_INDEX = 1
###########################
# Method called by each worker process to run game of life simulations.
#
# INPUTS:
# nx: The x dimension of the grid.
# ny: The y dimension of the grid.
# maxSteps: The max number of time steps to simulate.
# maxUnchanged: The max number of time steps with no vegetation change to
# simulate.
# prob: The probability of vegetation being placed in any given grid space.
# seed0: The original seed given in the input.
# mySims: The number of simulations this process will run.
# myProcess: Which process number this is, starting at 1.
# queue: The queue used to hold the results of all simulations run by this
# process, where each is a pair of integers stored in a tuple.
# OUTPUTS: None.
###########################
def worker(nx, ny, maxSteps, maxUnchanged, prob, seed0, mySims, myProcess,
queue):
steps = 0
vegies = 0
seed = 0
grid = [[0 for i in xrange(MAX_X + 2)] for j in xrange(MAX_Y + 2)]
for i in xrange(mySims):
seed = seed0 * ((myProcess * mySims) - i)
initializeGrid(grid, nx, ny, seed, prob)
steps, vegies = gameOfLife(grid, nx, ny, maxSteps, maxUnchanged)
queue.put((steps, vegies))
return
###########################
# Initializes an empty grid given grid dimensions, a seed, and vegetation
# probability.
#
# INPUTS:
# grid: A grid of vegetation values.
# nx: The x dimension of the grid.
# ny: The y dimension of the grid.
# seed: A random number seed.
# prob: The probability of vegetation being placed in any given grid space.
# OUTPUTS: None.
###########################
def initializeGrid(grid, nx, ny, seed, prob):
index = 0 # unique value for each grid cell
newSeed = 0 # unique seed for each grid point
for i in xrange(1, nx + 1):
for j in xrange(1, ny + 1):
index = ny * i + j
newSeed = seed + index
if (rand1(newSeed) > prob):
grid[i][j] = 0
else:
grid[i][j] = 1
return
###########################
# Runs a simulation of the game of life given an initialized grid, dimensions,
# and loop restrictions.
#
# INPUTS:
# grid: A grid of vegetation values.
# nx: The x dimension of the grid.
# ny: The y dimension of the grid.
# maxSteps: The max number of time steps to simulate.
# maxUnchanged: The max number of time steps with no vegetation change to
# simulate.
# OUTPUTS:
# The number of steps the simulation took and the final vegetation amount.
###########################
def gameOfLife(grid, nx, ny, maxSteps, maxUnchanged):
steps = 1 # counts the time steps
converged = False # true if the vegetation has stabilized
nUnchanged = 0 # # of time steps with no vegetation change
oldVegies = -1 # previous level of vegetation
old2Vegies = -1 # previous level of vegetation
old3Vegies = -1 # previous level of vegetation
vegies = 1 # total amount of vegetation
neighbors = 0 # quantity of neighboring vegetation
tempGrid = [[0 for i in xrange(MAX_X)] for j in xrange(MAX_Y)]
# Run simulation time steps as long as the vegetation has not stabilized,
# there is still vegetation remaining, and we have not reached the
# maximum number of steps.
while (not converged and vegies > 0 and steps < maxSteps):
# Count the total amount of vegetation.
vegies = 0
for i in xrange(1, nx + 1):
for j in xrange(1, ny + 1):
vegies = vegies + grid[i][j]
# If the amount of vegetation is the same as it was in any of the last
# three time steps, increment the number of unchanged steps, and check
# to see if the population has stabilized.
if (vegies == oldVegies or vegies == old2Vegies
or vegies == old3Vegies):
nUnchanged += 1
if (nUnchanged >= maxUnchanged):
converged = True
# Otherwise, the number of steps in a row where the vegetation has not
# changed is reset to 0.
else:
nUnchanged = 0
# Update the vegetation values for the last three time steps.
old3Vegies = old2Vegies
old2Vegies = oldVegies
oldVegies = vegies
# use to view step results in detail:
# print(" step {}: vegies = {}".format(steps, vegies))
# If the population has not stabilized, perform the next time step and
# put the results in a temporary grid which will be copied over into
# the original grid once it is complete.
if (not converged):
# Copy the sides of the grid to simulate edge wrapping.
for i in xrange(1, nx + 1):
grid[i][0] = grid[i][ny]
grid[i][ny + 1] = grid[i][1]
for j in xrange(0, ny + 2):
grid[0][j] = grid[nx][j]
grid[nx + 1][j] = grid[1][j]
# Run one time step, putting the result in tempGrid.
for i in xrange(1, nx + 1):
for j in xrange(1, ny + 1):
neighbors = grid[i - 1][j - 1] + grid[i - 1][j] \
+ grid[i - 1][j + 1] + grid[i][j - 1] + grid[i][j + 1] \
+ grid[i + 1][j - 1] + grid[i + 1][j] \
+ grid[i + 1][j + 1]
tempGrid[i][j] = grid[i][j]
# Decide how the current grid space is affected, based on
# the quantity of neighboring vegetation.
if (neighbors >= 25 or neighbors <= 3):
# Too crowded or too sparse, lose vegetation.
tempGrid[i][j] -= 1
# Don't allow negative vegetation.
if (tempGrid[i][j] < 0):
tempGrid[i][j] = 0
elif (neighbors <= 15):
# Just the right amount of neighbors for growth.
tempGrid[i][j] += 1
# Don't allow vegetation over 10 in one grid space.
if (tempGrid[i][j] > 10):
tempGrid[i][j] = 10
# Copy tempGrid back to grid.
for i in xrange(1, nx + 1):
for j in xrange(1, ny + 1):
grid[i][j] = tempGrid[i][j]
steps += 1
return steps, vegies
###########################
# Generates a float, using on the given seed, that is between 0 and 1.
#
# INPUTS:
# iseed: The integer used to generate the resulting double.
# OUTPUTS:
# A float between 0 and 1.
###########################
def rand1(iseed):
aa = 16807.0
mm = 2147483647.0
sseed = 0
for i in xrange(1, 6):
sseed = iseed
iseed = int(aa * sseed / mm)
sseed = (aa * sseed) - (mm * iseed)
iseed = int(sseed)
return sseed / mm
###########################
# Main method to run the game of life, using the multiprocessing module.
###########################
def main():
# grid of vegetation values
nx = MAX_X + 1 # x dimension of grid
ny = MAX_Y + 1 # y dimension of grid
maxSteps = STEPS_MAX # max # of time steps to simulate
maxUnchanged = UNCHANGED_MAX # max # of time steps with no vegetation change
stepsResult = 0 # number of steps actually run
vegiesResult = 0 # amount of stable vegetation
nsims = 0 # number of simulations to perform
ndied = 0 # # of populations which die out
nunsettled = 0 # # of populations which don't stabilize
nstable = 0 # # of populations which do stabilize
totalStepsStable = 0.0 # total/average steps to stabilization
totalVegiesStable = 0.0 # total/average stable vegetation
probability = 0 # population probability
seed0 = 0 # random number seed given by input
numProcesses = 0 # total number of processes to use
simsPerProcess = 0 # number of simulations each process will run
queueList = [] # a list of queues
processList = [] # a list of processes
queueResults = () # a tuple which will hold the results of a process
# read in all parameters
try:
numProcesses = int(math.pow(2, float(sys.argv[1])))
except TypeError:
sys.stderr.write("Program argument missing.")
sys.exit()
while (nx > MAX_X or ny > MAX_Y):
print("Enter X and Y dimensions of wilderness: ")
nx = int(input())
ny = int(input())
print("\nEnter population probability: ")
probability = float(input())
print("\nEnter number of simulations: ")
nsims = int(input())
print("\nEnter random number seed: ")
seed0 = int(input())
simsPerProcess = nsims // numProcesses
# | |
opposed
# to a programmer who can call the function with or without a 'password'.
# Hence, we treat an empty password here, as if no 'password' was passed.
password = get_password('Enter a password for an encrypted RSA'
' file \'' + Fore.RED + filepath + Fore.RESET + '\': ',
confirm=False) or None
if password is not None:
# This check will not fail, because a mal-formatted passed password fails
# above and an entered password will always be a string (see get_password)
# However, we include it in case PASSWORD_SCHEMA or get_password changes.
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
else:
logger.debug('No password was given. Attempting to import an'
' unencrypted file.')
# Read the contents of 'filepath' that should be a PEM formatted private key.
with open(filepath, 'rb') as file_object:
pem_key = file_object.read().decode('utf-8')
# Convert 'pem_key' to 'securesystemslib.formats.RSAKEY_SCHEMA' format.
# Raise 'securesystemslib.exceptions.CryptoError' if 'pem_key' is invalid.
# If 'password' is None decryption will be omitted.
rsa_key = securesystemslib.keys.import_rsakey_from_private_pem(pem_key,
scheme, password)
return rsa_key
def import_rsa_publickey_from_file(filepath, scheme='rsassa-pss-sha256'):
"""
<Purpose>
Import the RSA key stored in 'filepath'. The key object returned is in the
format 'securesystemslib.formats.RSAKEY_SCHEMA'. If the RSA PEM in
'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, an RSA PEM file.
scheme:
The signature scheme used by the imported key.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted.
securesystemslib.exceptions.Error, if a valid RSA key object cannot be
generated. This may be caused by an improperly formatted PEM file.
<Side Effects>
'filepath' is read and its contents extracted.
<Returns>
An RSA key object conformant to 'securesystemslib.formats.RSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# Is 'scheme' properly formatted?
securesystemslib.formats.RSA_SCHEME_SCHEMA.check_match(scheme)
# Read the contents of the key file that should be in PEM format and contains
# the public portion of the RSA key.
with open(filepath, 'rb') as file_object:
rsa_pubkey_pem = file_object.read().decode('utf-8')
# Convert 'rsa_pubkey_pem' to 'securesystemslib.formats.RSAKEY_SCHEMA' format.
try:
rsakey_dict = securesystemslib.keys.import_rsakey_from_public_pem(
rsa_pubkey_pem, scheme)
except securesystemslib.exceptions.FormatError as e:
raise securesystemslib.exceptions.Error('Cannot import improperly formatted'
' PEM file.' + repr(str(e)))
return rsakey_dict
def generate_and_write_ed25519_keypair(filepath=None, password=None):
"""
<Purpose>
Generate an Ed25519 keypair, where the encrypted key (using 'password' as
the passphrase) is saved to <'filepath'>. The public key portion of the
generated Ed25519 key is saved to <'filepath'>.pub. If the filepath is not
given, the KEYID is used as the filename and the keypair saved to the
current working directory.
The private key is encrypted according to 'cryptography's approach:
"Encrypt using the best available encryption for a given key's backend.
This is a curated encryption choice and the algorithm may change over
time."
<Arguments>
filepath:
The public and private key files are saved to <filepath>.pub and
<filepath>, respectively. If the filepath is not given, the public and
private keys are saved to the current working directory as <KEYID>.pub
and <KEYID>. KEYID is the generated key's KEYID.
password:
The password, or passphrase, to encrypt the private portion of the
generated Ed25519 key. A symmetric encryption key is derived from
'password', so it is not directly used.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be encrypted.
<Side Effects>
Writes key files to '<filepath>' and '<filepath>.pub'.
<Returns>
The 'filepath' of the written key.
"""
# Generate a new Ed25519 key object.
ed25519_key = securesystemslib.keys.generate_ed25519_key()
if not filepath:
filepath = os.path.join(os.getcwd(), ed25519_key['keyid'])
else:
logger.debug('The filepath has been specified. Not using the key\'s'
' KEYID as the default filepath.')
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the Ed25519'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=True)
else:
logger.debug('The password has been specified. Not prompting for one.')
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# If the parent directory of filepath does not exist,
# create it (and all its parent directories, if necessary).
securesystemslib.util.ensure_parent_dir(filepath)
# Create a temporary file, write the contents of the public key, and move
# to final destination.
file_object = securesystemslib.util.TempFile()
# Generate the ed25519 public key file contents in metadata format (i.e.,
# does not include the keyid portion).
keytype = ed25519_key['keytype']
keyval = ed25519_key['keyval']
scheme = ed25519_key['scheme']
ed25519key_metadata_format = securesystemslib.keys.format_keyval_to_metadata(
keytype, scheme, keyval, private=False)
file_object.write(json.dumps(ed25519key_metadata_format).encode('utf-8'))
# Write the public key (i.e., 'public', which is in PEM format) to
# '<filepath>.pub'. (1) Create a temporary file, (2) write the contents of
# the public key, and (3) move to final destination.
# The temporary file is closed after the final move.
file_object.move(filepath + '.pub')
# Write the encrypted key string, conformant to
# 'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA', to '<filepath>'.
file_object = securesystemslib.util.TempFile()
# Encrypt the private key if 'password' is set.
if len(password):
ed25519_key = securesystemslib.keys.encrypt_key(ed25519_key, password)
else:
logger.debug('An empty password was given. '
'Not encrypting the private key.')
ed25519_key = json.dumps(ed25519_key)
# Raise 'securesystemslib.exceptions.CryptoError' if 'ed25519_key' cannot be
# encrypted.
file_object.write(ed25519_key.encode('utf-8'))
file_object.move(filepath)
return filepath
def import_ed25519_publickey_from_file(filepath):
"""
<Purpose>
Load the ED25519 public key object (conformant to
'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return
'filepath' in securesystemslib.formats.ED25519KEY_SCHEMA format.
If the key object in 'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, a public key file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted or is an unexpected key type.
<Side Effects>
The contents of 'filepath' is read and saved.
<Returns>
An ED25519 key object conformant to
'securesystemslib.formats.ED25519KEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# ED25519 key objects are saved in json and metadata format. Return the
# loaded key object in securesystemslib.formats.ED25519KEY_SCHEMA' format that
# also includes the keyid.
ed25519_key_metadata = securesystemslib.util.load_json_file(filepath)
ed25519_key, junk = \
securesystemslib.keys.format_metadata_to_key(ed25519_key_metadata)
# Raise an exception if an unexpected key type is imported. Redundant
# validation of 'keytype'. 'securesystemslib.keys.format_metadata_to_key()'
# should have fully validated 'ed25519_key_metadata'.
if ed25519_key['keytype'] != 'ed25519': # pragma: no cover
message = 'Invalid key type loaded: ' + repr(ed25519_key['keytype'])
raise securesystemslib.exceptions.FormatError(message)
return ed25519_key
def import_ed25519_privatekey_from_file(filepath, password=None, prompt=False):
"""
<Purpose>
Import the encrypted ed25519 key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ED25519KEY_SCHEMA' format.
The private key (may also contain the public part) is encrypted with AES
256 and CTR the mode of operation. The password is strengthened with
PBKDF2-HMAC-SHA256.
<Arguments>
filepath:
<filepath> file, an RSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ed25519 key
object can be returned.
prompt:
If True the user is prompted for a passphrase to decrypt 'filepath'.
Default is False.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ed25519').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ed25519 key object of the form:
'securesystemslib.formats.ED25519KEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is | |
start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetTables(Paginator):
def paginate(self, DatabaseName: str, CatalogId: str = None, Expression: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Glue.Client.get_tables`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTables>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
CatalogId='string',
DatabaseName='string',
Expression='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string'
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string'
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **TableList** *(list) --*
A list of the requested ``Table`` objects.
- *(dict) --*
Represents a collection of related data organized in columns and rows.
- **Name** *(string) --*
Name of the table. For Hive compatibility, this must be entirely lowercase.
- **DatabaseName** *(string) --*
Name of the metadata database where the table metadata resides. For Hive compatibility, this must be all lowercase.
- **Description** *(string) --*
Description of the table.
- **Owner** *(string) --*
Owner of the table.
- **CreateTime** *(datetime) --*
Time when the table definition was created in the Data Catalog.
- **UpdateTime** *(datetime) --*
Last time the table was updated.
- **LastAccessTime** *(datetime) --*
Last time the table was accessed. This is usually taken from HDFS, and may not be reliable.
- **LastAnalyzedTime** *(datetime) --*
Last time column statistics were computed for this table.
- **Retention** *(integer) --*
Retention time for this table.
- **StorageDescriptor** *(dict) --*
A storage descriptor containing information about the physical storage of this table.
- **Columns** *(list) --*
A list of the ``Columns`` in the table.
- *(dict) --*
A column in a ``Table`` .
- **Name** *(string) --*
The name of the ``Column`` .
- **Type** *(string) --*
The datatype of data in the ``Column`` .
- **Comment** *(string) --*
Free-form text comment.
- **Location** *(string) --*
The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
- **InputFormat** *(string) --*
The input format: ``SequenceFileInputFormat`` (binary), or ``TextInputFormat`` , or a custom format.
- **OutputFormat** *(string) --*
The output format: ``SequenceFileOutputFormat`` (binary), or ``IgnoreKeyTextOutputFormat`` , or a custom format.
- **Compressed** *(boolean) --*
True if the data in the table is compressed, or False if not.
- **NumberOfBuckets** *(integer) --*
Must be specified if the table contains any dimension columns.
- **SerdeInfo** *(dict) --*
Serialization/deserialization (SerDe) information.
- **Name** *(string) --*
Name of the SerDe.
- **SerializationLibrary** *(string) --*
Usually the class that implements the SerDe. An example is: ``org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe`` .
- **Parameters** *(dict) --*
These key-value pairs define initialization parameters for the SerDe.
- *(string) --*
- *(string) --*
- **BucketColumns** *(list) --*
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
- *(string) --*
- **SortColumns** *(list) --*
A list specifying the sort order of each bucket in the table.
- *(dict) --*
Specifies the sort order of a sorted column.
- **Column** *(string) --*
The name of the column.
- **SortOrder** *(integer) --*
Indicates that the column is sorted in ascending order (``== 1`` ), or in descending order (``==0`` ).
- **Parameters** *(dict) --*
User-supplied properties in key-value form.
- *(string) --*
- *(string) --*
- **SkewedInfo** *(dict) --*
Information about values that appear very frequently in a column (skewed values).
- **SkewedColumnNames** *(list) --*
A list of names of columns that contain skewed values.
- *(string) --*
- **SkewedColumnValues** *(list) --*
A list of values that appear so frequently as to be considered skewed.
- *(string) --*
- **SkewedColumnValueLocationMaps** *(dict) --*
A mapping of skewed values to the columns that contain them.
- *(string) --*
- *(string) --*
- **StoredAsSubDirectories** *(boolean) --*
True if the table data is stored in subdirectories, or False if not.
- **PartitionKeys** *(list) --*
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When creating a table used by Athena, and you do not specify any ``partitionKeys`` , you must at least set the value of ``partitionKeys`` to an empty list. For example:
``"PartitionKeys": []``
- *(dict) --*
A column in a ``Table`` .
- **Name** *(string) --*
The name of the ``Column`` .
- **Type** *(string) --*
The datatype of data in the ``Column`` .
- **Comment** *(string) --*
Free-form text comment.
- **ViewOriginalText** *(string) --*
If the table is a view, the original text of the view; otherwise ``null`` .
- **ViewExpandedText** *(string) --*
If the table is a view, the expanded text of the view; otherwise ``null`` .
- **TableType** *(string) --*
The type of this table (``EXTERNAL_TABLE`` , ``VIRTUAL_VIEW`` , etc.).
- **Parameters** *(dict) --*
These key-value pairs define properties associated with the table.
- *(string) --*
- *(string) --*
- **CreatedBy** *(string) --*
Person or entity who created the table.
:type CatalogId: string
:param CatalogId:
The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: **[REQUIRED]**
The database in the catalog whose tables to list. For Hive compatibility, this name is entirely lowercase.
:type Expression: string
:param Expression:
A regular expression pattern. If present, only those tables whose names match the pattern are returned.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetTriggers(Paginator):
def paginate(self, DependentJobName: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Glue.Client.get_triggers`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetTriggers>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
DependentJobName='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Triggers': [
{
'Name': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'SecurityConfiguration': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'
},
]
}
},
],
}
**Response Structure**
- *(dict) --*
- **Triggers** *(list) --*
A list of triggers for the specified job.
- *(dict) --*
Information about | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .utils import *
from .affine_net import *
from .momentum_net import *
import mermaid.module_parameters as pars
import mermaid.model_factory as py_mf
import mermaid.utils as py_utils
from functools import partial
from mermaid.libraries.functions.stn_nd import STNFunction_ND_BCXYZ
class MermaidNet(nn.Module):
"""
this network is an end to end system for momentum generation and mermaid registration
include the following parts
1 . (optional) affine net the affine network is used to affine the source and target image
2. the momentum generation net work, this network is a u-net like encoder decoder
3. the mermaid part, an map-based registration model would be called from the Mermaid tookit
In detail of implementation, we should take care of the memory issue, one possible solution is using low-resolution mapping and then upsampling the transformation map
1. affine network, this is a pretrained network, so only the forward model is used,
in current design, the input and output of this net is not downsampled
2. momentum generation net, this is a trainable network, but we would have a low-res factor to train it at a low-resolution
the input may still at original resolution (for high quality interpolation), but the size during the computation and of the output are determined by the low-res factor
3. mermaid part, this is an non-parametric unit, where should call from the mermaid, and the output transformation map should be upsampled to the
full resolution size. All momentum based mermaid registration method should be supported. (todo support velcoity methods)
so the input and the output of each part should be
1. affine: input: source, target, output: s_warped, affine_map
2. momentum: input: init_warped_source, target, output: low_res_mom
3. mermaid: input: s, low_res_mom, low_res_initial_map output: map, warped_source
pay attention in Mermaid toolkit, the image intensity and identity transformation coord are normalized into [0,1],
while in networks the intensity and identity transformation coord are normalized into [-1,1],
todo use the coordinate system consistent with mermaid [0,1]
"""
def __init__(self, img_sz=None, opt=None):
super(MermaidNet, self).__init__()
opt_mermaid = opt['tsk_set']['reg']['mermaid_net']
low_res_factor = opt['tsk_set']['reg'][('low_res_factor',1.,"factor of low-resolution map")]
batch_sz = opt['tsk_set']['batch_sz']
self.record_path = opt['tsk_set']['path']['record_path']
"""record path of the task"""
self.is_train = opt['tsk_set'][('train',False,'if is in train mode')]
"""if is in train mode"""
self.epoch = 0
"""the current epoch"""
self.using_physical_coord = opt_mermaid[('using_physical_coord',False,'use physical coordinate system')]
"""'use physical coordinate system"""
self.loss_type = opt['tsk_set']['loss'][('type','lncc',"the similarity measure type, support list: 'l1','mse','ncc','lncc'")]
"""the similarity measure supported by the mermaid: 'ssd','ncc','ncc_positive','ncc_negative', 'lncc', 'omt'"""
self.compute_inverse_map = opt['tsk_set']['reg'][('compute_inverse_map', False,"compute the inverse transformation map")]
"""compute the inverse transformation map"""
self.mermaid_net_json_pth = opt_mermaid[('mermaid_net_json_pth','',"the path for mermaid settings json")]
"""the path for mermaid settings json"""
self.sym_factor = opt_mermaid[('sym_factor',500,'factor on symmetric loss')]
"""factor on symmetric loss"""
self.epoch_activate_sym = opt_mermaid[('epoch_activate_sym',-1,'epoch activate the symmetric loss')]
"""epoch activate the symmetric loss"""
self.epoch_activate_multi_step = opt_mermaid[('epoch_activate_multi_step',-1,'epoch activate the multi-step')]
"""epoch activate the multi-step"""
self.reset_lr_for_multi_step = opt_mermaid[('reset_lr_for_multi_step',False,'if True, reset learning rate when multi-step begins')]
"""if True, reset learning rate when multi-step begins"""
self.lr_for_multi_step = opt_mermaid[('lr_for_multi_step',opt['tsk_set']['optim']['lr']/2,'if reset_lr_for_multi_step, reset learning rate when multi-step begins')]
"""if reset_lr_for_multi_step, reset learning rate when multi-step begins"""
self.multi_step = opt_mermaid[('num_step',2,'compute multi-step loss')]
"""compute multi-step loss"""
self.using_affine_init = opt_mermaid[('using_affine_init',True,'if ture, deploy an affine network before mermaid-net')]
"""if ture, deploy an affine network before mermaid-net"""
self.load_trained_affine_net = opt_mermaid[('load_trained_affine_net',True,'if true load_trained_affine_net; if false, the affine network is not initialized')]
"""if true load_trained_affine_net; if false, the affine network is not initialized"""
self.affine_init_path = opt_mermaid[('affine_init_path','',"the path of trained affined network")]
"""the path of trained affined network"""
self.affine_resoltuion = opt_mermaid[('affine_resoltuion',[-1,-1,-1],"the image resolution input for affine")]
self.affine_refine_step = opt_mermaid[('affine_refine_step', 5, "the multi-step num in affine refinement")]
"""the multi-step num in affine refinement"""
self.optimize_momentum_network = opt_mermaid[('optimize_momentum_network',True,'if true, optimize the momentum network')]
"""if true optimize the momentum network"""
self.epoch_list_fixed_momentum_network = opt_mermaid[('epoch_list_fixed_momentum_network',[-1],'list of epoch, fix the momentum network')]
"""list of epoch, fix the momentum network"""
self.epoch_list_fixed_deep_smoother_network = opt_mermaid[('epoch_list_fixed_deep_smoother_network',[-1],'epoch_list_fixed_deep_smoother_network')]
"""epoch_list_fixed_deep_smoother_network"""
self.clamp_momentum = opt_mermaid[('clamp_momentum',False,'clamp_momentum')]
"""if true, clamp_momentum"""
self.clamp_thre =opt_mermaid[('clamp_thre',1.0,'clamp momentum into [-clamp_thre, clamp_thre]')]
"""clamp momentum into [-clamp_thre, clamp_thre]"""
self.use_adaptive_smoother = False
self.print_loss_every_n_iter = 10 if self.is_train else 1
self.using_sym_on = True if self.is_train else False
if self.clamp_momentum:
print("Attention, the clamp momentum is on")
##### TODO the sigma also need to be set like sqrt(batch_sz) ##########
batch_sz = batch_sz if not self.using_sym_on else batch_sz*2
self.img_sz = [batch_sz, 1] + img_sz
self.affine_resoltuion = [batch_sz, 1]+ self.affine_resoltuion
self.dim = len(img_sz)
self.standard_spacing = 1. / (np.array(img_sz) - 1)
""" here we define the standard spacing measures the image coord from 0 to 1"""
spacing_to_refer = opt['dataset'][('spacing_to_refer',[1, 1, 1],'the physical spacing in numpy coordinate, only activate when using_physical_coord is true')]
self.spacing = normalize_spacing(spacing_to_refer, img_sz) if self.using_physical_coord else 1. / (
np.array(img_sz) - 1)
self.spacing = normalize_spacing(self.spacing, self.input_img_sz) if self.using_physical_coord else self.spacing
self.spacing = np.array(self.spacing) if type(self.spacing) is not np.ndarray else self.spacing
self.low_res_factor = low_res_factor
self.momentum_net = MomentumNet(low_res_factor,opt_mermaid)
if self.using_affine_init:
self.init_affine_net(opt)
else:
print("Attention, the affine net is not used")
self.mermaid_unit_st = None
self.init_mermaid_env()
self.print_count = 0
self.print_every_epoch_flag = True
self.n_batch = -1
self.inverse_map = None
def check_if_update_lr(self):
"""
check if the learning rate need to be updated, in mermaid net, it is implemented for adjusting the lr in the multi-step training
:return: if update the lr, return True and new lr, else return False and None
"""
if self.epoch == self.epoch_activate_multi_step and self.reset_lr_for_multi_step:
lr = self.lr_for_multi_step
self.reset_lr_for_multi_step = False
print("the lr is change into {} due to the activation of the multi-step".format(lr))
return True, lr
else:
return False, None
def init_affine_net(self,opt):
"""
initialize the affine network, if an affine_init_path is given , then load the affine model from the path.
:param opt: ParameterDict, task setting
:return:
"""
self.affine_net = AffineNetSym(self.img_sz[2:],opt)
self.affine_param = None
self.affine_net.compute_loss = False
self.affine_net.epoch_activate_sym = 1e7 # todo to fix this unatural setting
self.affine_net.set_step(self.affine_refine_step)
model_path = self.affine_init_path
if self.load_trained_affine_net and self.is_train:
checkpoint = torch.load(model_path, map_location='cpu')
self.affine_net.load_state_dict(checkpoint['state_dict'])
self.affine_net.cuda()
print("Affine model is initialized!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
else:
print("The Affine model is added, but not initialized, this should only take place when a complete checkpoint (including affine model) will be loaded")
self.affine_net.eval()
def set_cur_epoch(self,epoch=-1):
"""
set current epoch
:param epoch:
:return:
"""
if self.epoch !=epoch+1:
self.print_every_epoch_flag=True
self.epoch = epoch+1
def set_loss_fn(self, loss_fn):
"""
set loss function (disabled)
:param loss_fn:
:return:
"""
pass
def save_cur_mermaid_settings(self,params):
"""
save the mermaid settings into task record folder
:param params:
:return:
"""
saving_path = os.path.join(self.record_path,'nonp_setting.json')
params.write_JSON(saving_path, save_int=False)
params.write_JSON_comments(saving_path.replace('.json','_comment.json'))
def init_mermaid_env(self):
"""
setup the mermaid environment
* saving the settings into record folder
* initialize model from model, criterion and related variables
"""
spacing = self.spacing
params = pars.ParameterDict()
params.load_JSON( self.mermaid_net_json_pth) #''../easyreg/cur_settings_svf.json')
print(" The mermaid setting from {} included:".format(self.mermaid_net_json_pth))
print(params)
model_name = params['model']['registration_model']['type']
use_map = params['model']['deformation']['use_map']
compute_similarity_measure_at_low_res = params['model']['deformation'][
('compute_similarity_measure_at_low_res', False, 'to compute Sim at lower resolution')]
params['model']['registration_model']['similarity_measure']['type'] =self.loss_type
params.print_settings_off()
self.mermaid_low_res_factor = self.low_res_factor
smoother_type = params['model']['registration_model']['forward_model']['smoother']['type']
self.use_adaptive_smoother =smoother_type=='learned_multiGaussianCombination'
lowResSize = None
lowResSpacing = None
##
if self.mermaid_low_res_factor == 1.0 or self.mermaid_low_res_factor == [1., 1., 1.]:
self.mermaid_low_res_factor = None
self.lowResSize = self.img_sz
self.lowResSpacing = spacing
##
if self.mermaid_low_res_factor is not None:
lowResSize = get_res_size_from_size(self.img_sz, self.mermaid_low_res_factor)
lowResSpacing = get_res_spacing_from_spacing(spacing, self.img_sz, lowResSize)
self.lowResSize = lowResSize
self.lowResSpacing = lowResSpacing
if self.mermaid_low_res_factor is not None:
# computes model at a lower resolution than the image similarity
if compute_similarity_measure_at_low_res:
mf = py_mf.ModelFactory(lowResSize, lowResSpacing, lowResSize, lowResSpacing)
else:
mf = py_mf.ModelFactory(self.img_sz, spacing, lowResSize, lowResSpacing)
else:
# computes model and similarity at the same resolution
mf = py_mf.ModelFactory(self.img_sz, spacing, self.img_sz, spacing)
model, criterion = mf.create_registration_model(model_name, params['model'], compute_inverse_map=self.compute_inverse_map)
if use_map:
# create the identity map [0,1]^d, since we will use a map-based implementation
_id = py_utils.identity_map_multiN(self.img_sz, spacing)
self.identityMap = torch.from_numpy(_id).cuda()
if self.mermaid_low_res_factor is not None:
# create a lower resolution map for the computations
lowres_id = py_utils.identity_map_multiN(lowResSize, lowResSpacing)
self.lowResIdentityMap = torch.from_numpy(lowres_id).cuda()
resize_affine_input = all([sz != -1 for sz in self.affine_resoltuion[2:]])
if resize_affine_input:
self.affine_spacing = get_res_spacing_from_spacing(spacing, self.img_sz, self.affine_resoltuion)
affine_id = py_utils.identity_map_multiN(self.affine_resoltuion, self.affine_spacing)
self.affineIdentityMap = torch.from_numpy(affine_id).cuda()
self.lowRes_fn = partial(get_resampled_image, spacing=spacing, desiredSize=lowResSize, zero_boundary=False,identity_map=self.lowResIdentityMap)
self.mermaid_unit_st = model.cuda()
self.criterion = criterion
self.mermaid_unit_st.associate_parameters_with_module()
self.save_cur_mermaid_settings(params)
def get_loss(self):
"""
get the overall loss
:return:
"""
return self.overall_loss
def __cal_sym_loss(self,rec_phiWarped):
"""
compute the symmetric loss,
:math: `loss_{sym} = \|(\varphi^{s t})^{-1} \circ(\varphi^{t s})^{-1}-i d\|_{2}^{2}`
:param rec_phiWarped:the transformation map, including two | |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from collections import defaultdict
import math
from odoo import api, fields, models, _
from odoo.addons import decimal_precision as dp
from odoo.exceptions import UserError
class MrpProduction(models.Model):
""" Manufacturing Orders """
_name = 'mrp.production'
_description = 'Manufacturing Order'
_date_name = 'date_planned_start'
_inherit = ['mail.thread', 'ir.needaction_mixin']
_order = 'date_planned_start asc,id'
@api.model
def _get_default_picking_type(self):
return self.env['stock.picking.type'].search([
('code', '=', 'mrp_operation'),
('warehouse_id.company_id', 'in', [self.env.context.get('company_id', self.env.user.company_id.id), False])],
limit=1).id
@api.model
def _get_default_location_src_id(self):
location = False
if self._context.get('default_picking_type_id'):
location = self.env['stock.picking.type'].browse(self.env.context['default_picking_type_id']).default_location_src_id
if not location:
location = self.env.ref('stock.stock_location_stock', raise_if_not_found=False)
return location and location.id or False
@api.model
def _get_default_location_dest_id(self):
location = False
if self._context.get('default_picking_type_id'):
location = self.env['stock.picking.type'].browse(self.env.context['default_picking_type_id']).default_location_dest_id
if not location:
location = self.env.ref('stock.stock_location_stock', raise_if_not_found=False)
return location and location.id or False
name = fields.Char(
'Reference', copy=False, readonly=True, default=lambda x: _('New'))
origin = fields.Char(
'Source', copy=False,
help="Reference of the document that generated this production order request.")
product_id = fields.Many2one(
'product.product', 'Product',
domain=[('type', 'in', ['product', 'consu'])],
readonly=True, required=True,
states={'confirmed': [('readonly', False)]})
product_tmpl_id = fields.Many2one('product.template', 'Product Template', related='product_id.product_tmpl_id')
product_qty = fields.Float(
'Quantity To Produce',
default=1.0, digits=dp.get_precision('Product Unit of Measure'),
readonly=True, required=True,
states={'confirmed': [('readonly', False)]})
product_uom_id = fields.Many2one(
'product.uom', 'Product Unit of Measure',
oldname='product_uom', readonly=True, required=True,
states={'confirmed': [('readonly', False)]})
picking_type_id = fields.Many2one(
'stock.picking.type', 'Picking Type',
default=_get_default_picking_type, required=True)
location_src_id = fields.Many2one(
'stock.location', 'Raw Materials Location',
default=_get_default_location_src_id,
readonly=True, required=True,
states={'confirmed': [('readonly', False)]},
help="Location where the system will look for components.")
location_dest_id = fields.Many2one(
'stock.location', 'Finished Products Location',
default=_get_default_location_dest_id,
readonly=True, required=True,
states={'confirmed': [('readonly', False)]},
help="Location where the system will stock the finished products.")
date_planned_start = fields.Datetime(
'Deadline Start', copy=False, default=fields.Datetime.now,
index=True, required=True,
states={'confirmed': [('readonly', False)]}, oldname="date_planned")
date_planned_finished = fields.Datetime(
'Deadline End', copy=False, default=fields.Datetime.now,
index=True,
states={'confirmed': [('readonly', False)]})
date_start = fields.Datetime('Start Date', copy=False, index=True, readonly=True)
date_finished = fields.Datetime('End Date', copy=False, index=True, readonly=True)
bom_id = fields.Many2one(
'mrp.bom', 'Bill of Material',
readonly=True, states={'confirmed': [('readonly', False)]},
help="Bill of Materials allow you to define the list of required raw materials to make a finished product.")
routing_id = fields.Many2one(
'mrp.routing', 'Routing',
readonly=True, compute='_compute_routing', store=True,
help="The list of operations (list of work centers) to produce the finished product. The routing "
"is mainly used to compute work center costs during operations and to plan future loads on "
"work centers based on production planning.")
move_raw_ids = fields.One2many(
'stock.move', 'raw_material_production_id', 'Raw Materials', oldname='move_lines',
copy=False, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
domain=[('scrapped', '=', False)])
move_finished_ids = fields.One2many(
'stock.move', 'production_id', 'Finished Products',
copy=False, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
domain=[('scrapped', '=', False)])
workorder_ids = fields.One2many(
'mrp.workorder', 'production_id', 'Work Orders',
copy=False, oldname='workcenter_lines', readonly=True)
workorder_count = fields.Integer('# Work Orders', compute='_compute_workorder_count')
workorder_done_count = fields.Integer('# Done Work Orders', compute='_compute_workorder_done_count')
state = fields.Selection([
('confirmed', 'Confirmed'),
('planned', 'Planned'),
('progress', 'In Progress'),
('done', 'Done'),
('cancel', 'Cancelled')], string='State',
copy=False, default='confirmed', track_visibility='onchange')
availability = fields.Selection([
('assigned', 'Available'),
('partially_available', 'Partially Available'),
('waiting', 'Waiting'),
('none', 'None')], string='Availability',
compute='_compute_availability', store=True)
unreserve_visible = fields.Boolean(
'Inventory Unreserve Visible', compute='_compute_unreserve_visible',
help='Technical field to check when we can unreserve')
post_visible = fields.Boolean(
'Inventory Post Visible', compute='_compute_post_visible',
help='Technical field to check when we can post')
user_id = fields.Many2one('res.users', 'Responsible', default=lambda self: self._uid)
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('mrp.production'),
required=True)
check_to_done = fields.Boolean(compute="_get_produced_qty", string="Check Produced Qty",
help="Technical Field to see if we can show 'Mark as Done' button")
qty_produced = fields.Float(compute="_get_produced_qty", string="Quantity Produced")
procurement_group_id = fields.Many2one(
'procurement.group', 'Procurement Group',
copy=False)
procurement_ids = fields.One2many('procurement.order', 'production_id', 'Related Procurements')
propagate = fields.Boolean(
'Propagate cancel and split',
help='If checked, when the previous move of the move (which was generated by a next procurement) is cancelled or split, the move generated by this move will too')
has_moves = fields.Boolean(compute='_has_moves')
scrap_ids = fields.One2many('stock.scrap', 'production_id', 'Scraps')
scrap_count = fields.Integer(compute='_compute_scrap_move_count', string='Scrap Move')
priority = fields.Selection([('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')], 'Priority',
readonly=True, states={'confirmed': [('readonly', False)]}, default='1')
@api.multi
@api.depends('bom_id.routing_id', 'bom_id.routing_id.operation_ids')
def _compute_routing(self):
for production in self:
if production.bom_id.routing_id.operation_ids:
production.routing_id = production.bom_id.routing_id.id
else:
production.routing_id = False
@api.multi
@api.depends('workorder_ids')
def _compute_workorder_count(self):
data = self.env['mrp.workorder'].read_group([('production_id', 'in', self.ids)], ['production_id'], ['production_id'])
count_data = dict((item['production_id'][0], item['production_id_count']) for item in data)
for production in self:
production.workorder_count = count_data.get(production.id, 0)
@api.multi
@api.depends('workorder_ids.state')
def _compute_workorder_done_count(self):
data = self.env['mrp.workorder'].read_group([
('production_id', 'in', self.ids),
('state', '=', 'done')], ['production_id'], ['production_id'])
count_data = dict((item['production_id'][0], item['production_id_count']) for item in data)
for production in self:
production.workorder_done_count = count_data.get(production.id, 0)
@api.multi
@api.depends('move_raw_ids.state', 'move_raw_ids.partially_available', 'workorder_ids.move_raw_ids', 'bom_id.ready_to_produce')
def _compute_availability(self):
for order in self:
if not order.move_raw_ids:
order.availability = 'none'
continue
if order.bom_id.ready_to_produce == 'all_available':
order.availability = any(move.state not in ('assigned', 'done', 'cancel') for move in order.move_raw_ids) and 'waiting' or 'assigned'
else:
partial_list = [x.partially_available and x.state in ('waiting', 'confirmed', 'assigned') for x in order.move_raw_ids]
assigned_list = [x.state in ('assigned', 'done', 'cancel') for x in order.move_raw_ids]
order.availability = (all(assigned_list) and 'assigned') or (any(partial_list) and 'partially_available') or 'waiting'
@api.depends('state', 'move_raw_ids.reserved_quant_ids')
def _compute_unreserve_visible(self):
for order in self:
if order.state in ['done', 'cancel'] or not order.move_raw_ids.mapped('reserved_quant_ids'):
order.unreserve_visible = False
else:
order.unreserve_visible = True
@api.multi
@api.depends('move_raw_ids.quantity_done', 'move_finished_ids.quantity_done')
def _compute_post_visible(self):
for order in self:
if order.product_tmpl_id._is_cost_method_standard():
order.post_visible = any((x.quantity_done > 0 and x.state not in ['done', 'cancel']) for x in order.move_raw_ids) or \
any((x.quantity_done > 0 and x.state not in ['done' 'cancel']) for x in order.move_finished_ids)
else:
order.post_visible = any((x.quantity_done > 0 and x.state not in ['done' 'cancel']) for x in order.move_finished_ids)
@api.multi
@api.depends('workorder_ids.state', 'move_finished_ids')
def _get_produced_qty(self):
for production in self:
done_moves = production.move_finished_ids.filtered(lambda x: x.state != 'cancel' and x.product_id.id == production.product_id.id)
qty_produced = sum(done_moves.mapped('quantity_done'))
wo_done = True
if any([x.state not in ('done', 'cancel') for x in production.workorder_ids]):
wo_done = False
production.check_to_done = done_moves and (qty_produced >= production.product_qty) and (production.state not in ('done', 'cancel')) and wo_done
production.qty_produced = qty_produced
return True
@api.multi
@api.depends('move_raw_ids')
def _has_moves(self):
for mo in self:
mo.has_moves = any(mo.move_raw_ids)
@api.multi
def _compute_scrap_move_count(self):
data = self.env['stock.scrap'].read_group([('production_id', 'in', self.ids)], ['production_id'], ['production_id'])
count_data = dict((item['production_id'][0], item['production_id_count']) for item in data)
for production in self:
production.scrap_count = count_data.get(production.id, 0)
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
('qty_positive', 'check (product_qty > 0)', 'The quantity to produce must be positive!'),
]
@api.onchange('product_id', 'picking_type_id', 'company_id')
def onchange_product_id(self):
""" Finds UoM of changed product. """
if not self.product_id:
self.bom_id = False
else:
bom = self.env['mrp.bom']._bom_find(product=self.product_id, picking_type=self.picking_type_id, company_id=self.company_id.id)
if bom.type == 'normal':
self.bom_id = bom.id
else:
self.bom_id = False
self.product_uom_id = self.product_id.uom_id.id
return {'domain': {'product_uom_id': [('category_id', '=', self.product_id.uom_id.category_id.id)]}}
@api.onchange('picking_type_id')
def onchange_picking_type(self):
location = self.env.ref('stock.stock_location_stock')
self.location_src_id = self.picking_type_id.default_location_src_id.id or location.id
self.location_dest_id = self.picking_type_id.default_location_dest_id.id or location.id
@api.model
def create(self, values):
if not values.get('name', False) or values['name'] == _('New'):
values['name'] = self.env['ir.sequence'].next_by_code('mrp.production') or _('New')
if not values.get('procurement_group_id'):
values['procurement_group_id'] = self.env["procurement.group"].create({'name': values['name']}).id
production = super(MrpProduction, self).create(values)
production._generate_moves()
return production
@api.multi
def unlink(self):
if any(production.state != 'cancel' for production in self):
raise UserError(_('Cannot delete a manufacturing order not in cancel state'))
return super(MrpProduction, self).unlink()
@api.multi
def _generate_moves(self):
for production in self:
production._generate_finished_moves()
factor = production.product_uom_id._compute_quantity(production.product_qty, production.bom_id.product_uom_id) / production.bom_id.product_qty
boms, lines = production.bom_id.explode(production.product_id, factor, picking_type=production.bom_id.picking_type_id)
production._generate_raw_moves(lines)
# Check for all draft moves whether they are mto or not
production._adjust_procure_method()
production.move_raw_ids.action_confirm()
return True
def _generate_finished_moves(self):
move = self.env['stock.move'].create({
'name': self.name,
'date': self.date_planned_start,
'date_expected': self.date_planned_start,
'product_id': self.product_id.id,
'product_uom': self.product_uom_id.id,
'product_uom_qty': self.product_qty,
'location_id': self.product_id.property_stock_production.id,
'location_dest_id': self.location_dest_id.id,
'move_dest_id': self.procurement_ids and self.procurement_ids[0].move_dest_id.id or False,
'procurement_id': self.procurement_ids and self.procurement_ids[0].id or False,
'company_id': self.company_id.id,
'production_id': self.id,
'origin': self.name,
'group_id': self.procurement_group_id.id,
'propagate': self.propagate,
})
move.action_confirm()
return move
def _generate_raw_moves(self, exploded_lines):
self.ensure_one()
moves = self.env['stock.move']
for bom_line, line_data in exploded_lines:
moves += self._generate_raw_move(bom_line, line_data)
return moves
def _generate_raw_move(self, bom_line, line_data):
quantity = line_data['qty']
# alt_op needed for the case when you explode phantom bom and all the lines will be consumed in the operation given by the parent bom line
alt_op = line_data['parent_line'] and line_data['parent_line'].operation_id.id or False
if bom_line.child_bom_id and bom_line.child_bom_id.type == 'phantom':
return self.env['stock.move']
if bom_line.product_id.type not in ['product', 'consu']:
return self.env['stock.move']
if self.routing_id:
routing = self.routing_id
else:
routing = self.bom_id.routing_id
if routing and routing.location_id:
source_location = routing.location_id
else:
source_location = self.location_src_id
original_quantity = self.product_qty - self.qty_produced
data = {
'name': self.name,
'date': self.date_planned_start,
'date_expected': self.date_planned_start,
'bom_line_id': bom_line.id,
'product_id': bom_line.product_id.id,
'product_uom_qty': quantity,
'product_uom': bom_line.product_uom_id.id,
'location_id': source_location.id,
'location_dest_id': self.product_id.property_stock_production.id,
'raw_material_production_id': self.id,
'company_id': self.company_id.id,
'operation_id': bom_line.operation_id.id or | |
dataset's fact table
i += 1
outfile.write(f"*TOTAL*,{self.totalCOR},{self.totalINC},{self.totalPAR},{self.totalSPU},{self.totalMIS},{self.totalIGN}")
self._possible = self.totalCOR + self.totalINC + self.totalPAR + self.totalMIS
self._actual = self.totalCOR + self.totalINC + self.totalPAR + self.totalSPU
self._wrong = self.totalINC + (self.totalPAR/2) + self.totalMIS + self.totalSPU
self._total = self.totalCOR + self.totalINC + self.totalPAR + self.totalMIS + self.totalSPU
print(f"Done. ({i+1} lines)")
# ==================================================================================================
# Manages the set of outputs.
class OutputSet:
__slots__ = ('dataset', 'folder', 'DMC_List', 'tuples')
def __init__(self, dataset):
Utils.setDS(dataset) # sets the usage for the current dataset
self.dataset = dataset
self.folder = Utils.getFolder("outputs") + Utils._config["CSVs"] # for the current dataset: "../_post-processing/"
self.DMC_List = []
self.tuples = 0
def newOutput(self, model, category):
o = DMC(self.dataset, model, category)
self.DMC_List.append(o)
return o
def getOutput(self, model, category):
x = None
for o in self.DMC_List:
if o.hasSameID(self.dataset, model, category):
x = o
break
if (x is None): # new output
x = self.newOutput(model, category)
return x
def getNumberOfOutputs(self) -> int:
return len(self.DMC_List)
def getNumberOfAddedTuples(self) -> int:
return self.tuples
def addTuple(self, model, category, entity, metric):
o = self.getOutput(model, category)
COR = int(metric == Metric.COR)
INC = int(metric == Metric.INC)
PAR = int(metric == Metric.PAR)
SPU = int(metric == Metric.SPU)
MIS = int(metric == Metric.MIS)
IGN = int(metric == Metric.IGN)
o.add(entity, COR, INC, PAR, SPU, MIS, IGN)
self.tuples += 1
# We assume that all metrics have been computed, including the last one: MIS
def generateCSVs(self):
def F(B, P, R): # F-measure computation
B_sqr = B**2
num = ((B_sqr + 1.0) * P * R)
den = ((B_sqr * P) + R)
return (num / den) if (den > 0) else -1.0
factTableFile = Utils.getDSname(self.dataset) + "-RESULTS" + Utils._config["fact-table-file-suffix"]
metricsFTFile = Utils.getDSname(self.dataset) + "-RESULTS-MUC5-Metrics" + Utils._config["fact-table-file-suffix"]
with open(self.folder + factTableFile, 'w') as ofFT_entities:
ofFT_entities.write(f"{Utils.getDMCstructure()},{Utils.getCSVtupleStructure()}\n")
with open(self.folder + metricsFTFile, 'w') as ofFT_metrics:
ofFT_metrics.write(Utils.getResultFileHeading())
for o in self.DMC_List: # for each output set
o.csv(self.folder, ofFT_entities) # generate the CSVs
# Calculate the MUC-5 metrics
COR = o.getTotalCORscore() # Correct
PAR = o.getTotalPARscore() # Partially correct
INC = o.getTotalINCscore() # Incorrect
MIS = o.getTotalMISscore() # Missing: keys (tags) that were not matched.
SPU = o.getTotalSPUscore() # Spurious: results that were not matched with key (tags).
POS = o.getPossibleScore() # ** Possible
ACT = o.getActualScore() # ** Actual
WRG = o.getWrongScore() # ** Wrong
TOT = o.getTotalScore() # ** Total
MAT = (COR + (PAR * 0.5)) # Matches: correct and partial
CPI = (COR + PAR + INC) # (correct + partial + incorrect)
# -1 indicates #DIV/0! error
# Primary Metrics:
ERR = (WRG / TOT) if (TOT > 0) else -1 # Error: % of wrong answers.
PRE = (MAT / ACT) if (ACT > 0) else -1 # Precision: % of actual answers given which were correct.
REC = (MAT / POS) if (POS > 0) else -1 # Recall: % of possible answers which were correct.
# Secondary Metrics:
UND = (MIS / POS) if (POS > 0) else -1 # Undergeneration
OVG = (SPU / ACT) if (ACT > 0) else -1 # Overgeneration
SUB = (MAT / CPI) if (CPI > 0) else -1 # Substitution
# F-measures:
Fm__PR = F(1.0, PRE, REC) # For recall and precision are equally important.
Fm_2PR = F(0.5, PRE, REC) # For recall half as important as precision.
Fm_P2R = F(2.0, PRE, REC) # For recall twice as important as precision.
ofFT_metrics.write(\
f"{o.getIDtuple()}," +\
f"{POS},{ACT},{WRG:.2f},{TOT}," +\
f"{ERR:.6f},{PRE:.6f},{REC:.6f}," +\
f"{Fm__PR:.6f},{Fm_2PR:.6f},{Fm_P2R:.6f}," +\
f"{UND:.6f},{OVG:.6f},{SUB:.6f}\n")
print(f"-> {metricsFTFile} ... Done.")
print(f"-> {factTableFile} ... Done.")
# ==================================================================================================
# Manages the tags as part of the post-processing input.
class Tags:
FILE_MAPPINGS = Utils._config["mappings-file"]
FILE_MERGED_TAGS = Utils._config["merged-tags-file"]
FILE_MERGED_TAGS_BY_CATEGORY = Utils._config["merged-tags-by-category-file"]
__slots__ = ('dataset', 'folder', 'files', 'tags', 'mergedTags', 'mergedTagsByCategory', 'mappings', 'matchedTags', 'models')
def __init__(self, folder, dataset):
self.folder = folder
self.dataset = dataset
self.tags = [] # list of tags obtain from the files.
self.mergedTags = {} # dict of merged tags from the original compiled list.
self.mergedTagsByCategory = {} # dict of merged tags from the original compiled list by category (for validation mappings purposes).
self.mappings = {} # dict of the mappings: tagged categories with the TNNT categories.
self.matchedTags = {} # dict of the matched tags per model.
self.models = {} # dict of the processed models. For each model, we keep a set of the analysed categories.
mappings_file , self.mappings = self.loadMappings()
mergedTags_file, self.mergedTags = self.loadMergedTags()
mergedTagsByCategory_file, self.mergedTagsByCategory = self.loadMergedTagsByCategory()
self.files = [ f for f in Path(self.folder).glob("*.json") \
if (f.is_file() and \
(str(f) != str(mappings_file)) and \
(str(f) != str(mergedTags_file)) and \
(str(f) != str(mergedTagsByCategory_file)) ) ] # all the tag files excluding the mappings, mergedTags, and mergedTagsByCategory files
self.loadTags()
if not(self.mappings): # the mappings are empty
self.createInitialMappings()
print("")
def loadFromFile(self, struct, filename, desc):
_file = Path(self.folder + filename)
struct = {}
if _file.exists():
print(f"Loading *{desc}* from [{str(_file)}]... ", end="")
with _file.open() as f: # mode="rt" (default)
struct = json.loads(f.read())
print(f"{len(struct):n} {desc} were loaded.")
else:
print(f"The *{desc}* file was not found.")
return _file, struct
def loadMappings(self):
return self.loadFromFile(self.mappings, Tags.FILE_MAPPINGS, "mappings")
def loadMergedTags(self):
return self.loadFromFile(self.mergedTags, Tags.FILE_MERGED_TAGS, "merged tags")
def loadMergedTagsByCategory(self):
return self.loadFromFile(self.mergedTagsByCategory, Tags.FILE_MERGED_TAGS_BY_CATEGORY, "merged tags by category")
def createInitialMappings(self):
mappings_file = Path(self.folder + Tags.FILE_MAPPINGS)
print(f"Generating initial mappings file ({str(mappings_file)})...")
for pair in self.tags:
self.mappings[ str(pair[1])[2:] ] = [] # array of categories to be edited: "B-PER" --> "PER"
with open(str(mappings_file), 'w') as outfile:
outfile.write(json.dumps(self.mappings, indent=4))
print(f"{self.howManyMappings():n} mapping elements were created.")
def loadTags(self):
print(f"Loading the *tags* from {self.folder}...")
self.tags = []
for file in self.files: # from the list of files
print(f"\t+ Adding {str(file)}...")
with file.open() as f: # mode="rt" (default)
tagFile = json.loads(f.read())
for pair in tagFile:
self.tags.append(pair)
print(f"All *tags* have been loaded. Total number of *tags*: {self.howManyTags():n}")
if not(self.mergedTags): # no merged tags were found
self.merge()
if not(self.mergedTagsByCategory): # no merged tags by category were found
self.mergeByCategory()
def printTags(self):
print(f"Loaded *Tags*:\n{json.dumps(self.tags, indent=4)}\n")
def printMappings(self):
print(f"Loaded *Mappings*:\n{json.dumps(self.mappings, indent=4)}\n")
def printMergedTags(self):
print(f"Loaded *Merged Tags*:\n{json.dumps(self.mergedTags, indent=4)}\n")
def printMergedTagsByCategory(self):
print(f"Loaded *Merged Tags By Category*:\n{json.dumps(self.mergedTagsByCategory, indent=4)}\n")
def printMatchedTags(self):
print(f"Loaded *Matched Tags*:\n{json.dumps(self.matchedTags, indent=4)}\n")
def getTags(self):
return self.tags
def getMergedTags(self):
return self.mergedTags
def getMergedTagsByCategory(self):
return self.mergedTagsByCategory
def getMappings(self):
return self.mappings
def howManyTags(self) -> int:
return len(self.tags)
def howManyMergedTags(self) -> int:
return len(self.mergedTags)
def howManyMatchedTagsIn(self, model) -> int:
return len(self.matchedTags[model])
def howManyMappings(self) -> int:
return len(self.mappings)
def merge(self):
def inrange(i) -> bool:
return (i < self.howManyTags())
def Next(i):
return self.tags[i] if inrange(i) else []
def add(entity, tag) -> None:
# print(f"\t+ [tag #{i}]: {{{entity.encode('ascii', 'ignore')} : {tag}}}")
if (entity not in self.mergedTags):
self.mergedTags[entity] = [tag] # a list of tags
else:
(self.mergedTags[entity]).append(tag) # append the tag in the list
print(f"Merging the *tags*...")
self.mergedTags = {}
i = 0
while inrange(i): # index the list's range
p1, p2 = Next(i), Next(i + 1)
# For all the datasets, we have the same expected format of the tag files: { if (self.dataset == Dataset.CONLL2003): }
entity = p1[0] # default value: from the first pair
tag = p1[1][2:] # "B-PER" --> "PER"
if p1 and p2: # we have the two pairs
if ( (p1[1].startswith("B-")) and (p2[1].startswith("I-")) and (tag == p2[1][2:]) ): # ("B-LOC", "I-LOC")
entity = p1[0] + " " + p2[0] # merge p1 and p2
j = 2
p = Next(i + j)
while p: # loop the following pairs --> "I-LOC", "I-LOC"...
if ( (p[1].startswith("I-")) and (tag == p[1][2:]) ):
entity = entity + " " + p[0] # merge pj...
j += 1
p = Next(i + j)
else:
break # exit loop
i += j # shift by *j*
else: # two separate entities: just add the first one
i += 1 # shift by 1
add(entity, tag) # adds either p1 or (merged pairs)
if (i == self.howManyTags()-1) and not p2: # (i is the previous to the last position) and (p2 is empty)
break # exit main loop
for (k,v) in self.mergedTags.items():
self.mergedTags[k] = list(set(v)) # remove duplicate tags
mergedTags_file = Path(self.folder + Tags.FILE_MERGED_TAGS)
print(f"Generating merged | |
self._minval, self._maxval
for idx in range(len(created_vars), size):
assert idx == len(created_vars)
smtval = z3.Int(self._varname + "@" + str(idx))
space.add(smtval >= minval)
space.add(smtval <= maxval)
created_vars.append(SymbolicInt(smtval))
def __len__(self):
return self._len
def __bool__(self) -> bool:
return SymbolicBool(self._len.var == 0).__bool__()
def __eq__(self, other):
if self is other:
return True
if not is_iterable(other):
return False
if len(self) != len(other):
return False
otherlen = realize(len(other))
with NoTracing():
self._create_up_to(otherlen)
constraints = []
for (int1, int2) in zip(self._created_vars, tracing_iter(other)):
smtint2 = force_to_smt_sort(int2, SymbolicInt)
constraints.append(int1.var == smtint2)
return SymbolicBool(z3.And(*constraints))
def __repr__(self):
return str(tuple(self))
def __iter__(self):
with NoTracing():
my_smt_len = self._len.var
created_vars = self._created_vars
space = context_statespace()
idx = -1
while True:
with NoTracing():
idx += 1
if not space.smt_fork(idx < my_smt_len):
return
self._create_up_to(idx + 1)
yield created_vars[idx]
def __add__(self, other: object):
if isinstance(other, collections.abc.Sequence):
return SequenceConcatenation(self, other)
return NotImplemented
def __radd__(self, other: object):
if isinstance(other, collections.abc.Sequence):
return SequenceConcatenation(other, self)
return NotImplemented
def __getitem__(self, argument):
with NoTracing():
space = context_statespace()
if isinstance(argument, slice):
start, stop, step = argument.start, argument.stop, argument.step
if start is None and stop is None and step is None:
return self
start, stop, step = realize(start), realize(stop), realize(step)
mylen = self._len
if stop and stop > 0 and space.smt_fork(mylen.var >= stop):
self._create_up_to(stop)
elif (
stop is None
and 0 <= start
and space.smt_fork(start <= mylen.var)
and step is None
):
return SliceView(self, start, mylen)
else:
self._create_up_to(realize(mylen))
return self._created_vars[start:stop:step]
else:
argument = realize(argument)
if argument >= 0 and space.smt_fork(self._len.var > argument):
self._create_up_to(realize(argument) + 1)
else:
self._create_up_to(realize(self._len))
return self._created_vars[argument]
def index(
self, value: object, start: int = 0, stop: int = 9223372036854775807
) -> int:
try:
start, stop = start.__index__(), stop.__index__()
except AttributeError:
# Re-create the error that list.index would give on bad start/stop values:
raise TypeError(
"slice indices must be integers or have an __index__ method"
)
mylen = self._len
if start < 0:
start += mylen
if stop < 0:
stop += mylen
for idx in range(max(start, 0), min(stop, mylen)): # type: ignore
if self[idx] == value:
return idx
raise ValueError
_ASCII_IDENTIFIER_RE = re.compile("[a-zA-Z_][a-zA-Z0-9_]*")
class AnySymbolicStr(AbcString):
def __ch_is_deeply_immutable__(self) -> bool:
return True
def __ch_pytype__(self):
return str
def __ch_realize__(self):
raise NotImplementedError
def __str__(self):
with NoTracing():
return self.__ch_realize__()
def __repr__(self):
return repr(self.__str__())
def _cmp_op(self, other, op):
assert op in (ops.lt, ops.le, ops.gt, ops.ge)
if not isinstance(other, str):
raise TypeError
if self == other:
return True if op in (ops.le, ops.ge) else False
for (mych, otherch) in zip_longest(iter(self), iter(other)):
if mych == otherch:
continue
if mych is None:
lessthan = True
elif otherch is None:
lessthan = False
else:
lessthan = ord(mych) < ord(otherch)
return lessthan if op in (ops.lt, ops.le) else not lessthan
assert False
def __lt__(self, other):
return self._cmp_op(other, ops.lt)
def __le__(self, other):
return self._cmp_op(other, ops.le)
def __gt__(self, other):
return self._cmp_op(other, ops.gt)
def __ge__(self, other):
return self._cmp_op(other, ops.ge)
def __bool__(self):
return realize(self.__len__() > 0)
def capitalize(self):
if self.__len__() == 0:
return ""
return self[0].title() + self[1:]
def casefold(self):
if len(self) != 1:
return "".join([ch.casefold() for ch in self])
char = self[0]
codepoint = ord(char)
with NoTracing():
space = context_statespace()
smt_codepoint = SymbolicInt._coerce_to_smt_sort(codepoint)
cache = space.extra(UnicodeMaskCache)
if not space.smt_fork(cache.casefold_exists()(smt_codepoint)):
return char
smt_1st = cache.casefold_1st()(smt_codepoint)
if not space.smt_fork(cache.casefold_2nd_exists()(smt_codepoint)):
return LazyIntSymbolicStr([SymbolicInt(smt_1st)])
smt_2nd = cache.casefold_2nd()(smt_codepoint)
if not space.smt_fork(cache.casefold_3rd_exists()(smt_codepoint)):
return LazyIntSymbolicStr([SymbolicInt(smt_1st), SymbolicInt(smt_2nd)])
smt_3rd = cache.casefold_3rd()(smt_codepoint)
return LazyIntSymbolicStr(
[SymbolicInt(smt_1st), SymbolicInt(smt_2nd), SymbolicInt(smt_3rd)]
)
def center(self, width, fillchar=" "):
if not isinstance(width, int):
raise TypeError
if (not isinstance(fillchar, str)) or len(fillchar) != 1:
raise TypeError
mylen = self.__len__()
if mylen >= width:
return self
remainder = width - mylen
smaller_half = remainder // 2
larger_half = remainder - smaller_half
if width % 2 == 1:
return (fillchar * larger_half) + self + (fillchar * smaller_half)
else:
return (fillchar * smaller_half) + self + (fillchar * larger_half)
def count(self, substr, start=None, end=None):
sliced = self[start:end]
if substr == "":
return len(sliced) + 1
return len(sliced.split(substr)) - 1
def encode(self, encoding="utf-8", errors="strict"):
return codecs.encode(self, encoding, errors)
def expandtabs(self, tabsize=8):
if not isinstance(tabsize, int):
raise TypeError
return self.replace("\t", " " * tabsize)
def index(self, substr, start=None, end=None):
idx = self.find(substr, start, end)
if idx == -1:
raise ValueError
return idx
def _chars_in_maskfn(self, maskfn: z3.ExprRef, ret_if_empty=False):
# Holds common logic behind the str.is* methods
space = context_statespace()
with ResumedTracing():
if self.__len__() == 0:
return ret_if_empty
for char in self:
codepoint = ord(char)
with NoTracing():
smt_codepoint = SymbolicInt._coerce_to_smt_sort(codepoint)
if not space.smt_fork(maskfn(smt_codepoint)):
return False
return True
def isalnum(self):
with NoTracing():
maskfn = context_statespace().extra(UnicodeMaskCache).alnum()
return self._chars_in_maskfn(maskfn)
def isalpha(self):
with NoTracing():
maskfn = context_statespace().extra(UnicodeMaskCache).alpha()
return self._chars_in_maskfn(maskfn)
def isascii(self):
with NoTracing():
maskfn = context_statespace().extra(UnicodeMaskCache).ascii()
return self._chars_in_maskfn(maskfn, ret_if_empty=True)
def isdecimal(self):
with NoTracing():
maskfn = context_statespace().extra(UnicodeMaskCache).decimal()
return self._chars_in_maskfn(maskfn)
def isdigit(self):
with NoTracing():
maskfn = context_statespace().extra(UnicodeMaskCache).digit()
return self._chars_in_maskfn(maskfn)
def isidentifier(self):
if _ASCII_IDENTIFIER_RE.fullmatch(self):
return True
elif self.isascii():
return False
# The full unicode rules are complex! Resort to realization.
# (see https://docs.python.org/3.3/reference/lexical_analysis.html#identifiers)
with NoTracing():
return realize(self).isidentifier()
def islower(self):
with NoTracing():
space = context_statespace()
lowerfn = space.extra(UnicodeMaskCache).lower()
upperfn = space.extra(UnicodeMaskCache).title() # (covers title and upper)
if self.__len__() == 0:
return False
found_one = False
for char in self:
codepoint = ord(char)
with NoTracing():
smt_codepoint = SymbolicInt._coerce_to_smt_sort(codepoint)
if space.smt_fork(upperfn(smt_codepoint)):
return False
if space.smt_fork(lowerfn(smt_codepoint)):
found_one = True
return found_one
def isnumeric(self):
with NoTracing():
maskfn = context_statespace().extra(UnicodeMaskCache).numeric()
return self._chars_in_maskfn(maskfn)
def isprintable(self):
with NoTracing():
maskfn = context_statespace().extra(UnicodeMaskCache).printable()
return self._chars_in_maskfn(maskfn, ret_if_empty=True)
def isspace(self):
with NoTracing():
maskfn = context_statespace().extra(UnicodeMaskCache).space()
return self._chars_in_maskfn(maskfn)
def istitle(self):
with NoTracing():
space = context_statespace()
lowerfn = space.extra(UnicodeMaskCache).lower()
titlefn = space.extra(UnicodeMaskCache).title()
expect_upper = True
found_char = False
for char in self:
codepoint = ord(char)
with NoTracing():
smt_codepoint = SymbolicInt._coerce_to_smt_sort(codepoint)
if space.smt_fork(titlefn(smt_codepoint)):
if not expect_upper:
return False
expect_upper = False
found_char = True
elif space.smt_fork(lowerfn(smt_codepoint)):
if expect_upper:
return False
else: # (uncased)
expect_upper = True
return found_char
def isupper(self):
with NoTracing():
space = context_statespace()
lowerfn = space.extra(UnicodeMaskCache).lower()
upperfn = space.extra(UnicodeMaskCache).upper()
if self.__len__() == 0:
return False
found_one = False
for char in self:
codepoint = ord(char)
with NoTracing():
smt_codepoint = SymbolicInt._coerce_to_smt_sort(codepoint)
if space.smt_fork(lowerfn(smt_codepoint)):
return False
if space.smt_fork(upperfn(smt_codepoint)):
found_one = True
return found_one
def join(self, itr):
return _join(self, itr, self_type=str, item_type=str)
def ljust(self, width, fillchar=" "):
if not isinstance(fillchar, str):
raise TypeError
if not isinstance(width, int):
raise TypeError
if len(fillchar) != 1:
raise TypeError
return self + fillchar * max(0, width - len(self))
def lower(self):
if len(self) != 1:
return "".join([ch.lower() for ch in self])
char = self[0]
codepoint = ord(char)
with NoTracing():
space = context_statespace()
smt_codepoint = SymbolicInt._coerce_to_smt_sort(codepoint)
cache = space.extra(UnicodeMaskCache)
if not space.smt_fork(cache.tolower_exists()(smt_codepoint)):
return char
smt_1st = cache.tolower_1st()(smt_codepoint)
if not space.smt_fork(cache.tolower_2nd_exists()(smt_codepoint)):
return LazyIntSymbolicStr([SymbolicInt(smt_1st)])
smt_2nd = cache.tolower_2nd()(smt_codepoint)
return LazyIntSymbolicStr([SymbolicInt(smt_1st), SymbolicInt(smt_2nd)])
def lstrip(self, chars=None):
if chars is None:
filter = lambda ch: ch.isspace()
elif isinstance(chars, str):
filter = lambda ch: ch in chars
else:
raise TypeError
for (idx, ch) in enumerate(self):
if not filter(ch):
return self[idx:]
return ""
def splitlines(self, keepends=False):
if not isinstance(keepends, int):
raise TypeError
mylen = self.__len__()
if mylen == 0:
return []
for (idx, ch) in enumerate(self):
codepoint = ord(ch)
with NoTracing():
space = context_statespace()
smt_isnewline = space.extra(UnicodeMaskCache).newline()
smt_codepoint = SymbolicInt._coerce_to_smt_sort(codepoint)
if not space.smt_fork(smt_isnewline(smt_codepoint)):
continue
if codepoint == ord("\r"):
if idx + 1 < mylen and self[idx + 1] == "\n":
token = self[: idx + 2] if keepends else self[:idx]
return [token] + self[idx + 2 :].splitlines(keepends)
token = self[: idx + 1] if keepends else self[:idx]
return [token] + self[idx + 1 :].splitlines(keepends)
return [self]
def removeprefix(self, prefix):
if not isinstance(prefix, str):
raise TypeError
if self.startswith(prefix):
return self[len(prefix) :]
return self
def removesuffix(self, suffix):
if not isinstance(suffix, str):
raise TypeError
if len(suffix) > 0 and self.endswith(suffix):
return self[: -len(suffix)]
return self
def replace(self, old, new, count=-1):
if not isinstance(old, str) or not isinstance(new, str):
raise TypeError
if count == 0:
return self
if self == "":
return new if old == "" else self
elif old == "":
return new + self[:1] + self[1:].replace(old, new, count - 1)
(prefix, match, suffix) = self.partition(old)
if not match:
return self
return prefix + new + suffix.replace(old, new, count - 1)
def rindex(self, | |
import argparse
import time
import torch
from Models import get_model
from Process import *
from Utils import *
import torch.nn.functional as F
from Optim import CosineWithRestarts
from Batch import create_masks
from Beam import beam_search
from torch.autograd import Variable
import dill as pickle
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import ReduceLROnPlateau
def simple_em(pred, gold):
pred, gold = remove_whitespace(pred), remove_whitespace(gold)
if pred.strip() == gold.strip():
return 1
return 0
def difficulty_em(src, pred, gold):
# return steps # , em
if '@@SEP@@' in src:
step_count = src.count('@@SEP@@') + 1
return step_count, simple_em(pred, gold)
else:
return polish_n_steps(src), simple_em((pred, gold))
def train_model(model, opt, SRC, TRG):
print("training model...")
model.train()
start = time.time()
if opt.checkpoint > 0:
cptime = time.time()
for epoch in range(opt.epochs):
model.train()
total_loss = 0
errors_per_epoch = 0
if opt.floyd is False:
print(" %dm: epoch %d [%s] %d%% loss = %s" %\
((time.time() - start)//60, epoch + 1, "".join(' '*20), 0, '...'), end='\r')
if opt.checkpoint > 0:
torch.save(model.state_dict(), 'weights/model_weights')
for i, batch in enumerate(opt.train):
src = batch.src.transpose(0, 1).cuda()
trg = batch.trg.transpose(0, 1).cuda()
trg_input = trg[:, :-1]
src_mask, trg_mask = create_masks(src, trg_input, opt)
preds = model(src, trg_input, src_mask, trg_mask)
ys = trg[:, 1:].contiguous().view(-1)
opt.optimizer.zero_grad()
loss = F.cross_entropy(preds.view(-1, preds.size(-1)), ys, ignore_index=opt.trg_pad)
loss.backward()
opt.optimizer.step()
if opt.SGDR:
opt.sched.step()
total_loss += loss.item()
if (i + 1) % opt.printevery == 0:
p = int(100 * (i + 1) / opt.train_len)
avg_loss = total_loss/opt.printevery
if opt.floyd is False:
print(" %dm: epoch %d [%s%s] %d%% loss = %.3f" %\
((time.time() - start)//60, epoch + 1, "".join('#'*(p//5)), "".join(' '*(20-(p//5))), p, avg_loss), end='\r')
else:
print(" %dm: epoch %d [%s%s] %d%% loss = %.3f" %\
((time.time() - start)//60, epoch + 1, "".join('#'*(p//5)), "".join(' '*(20-(p//5))), p, avg_loss))
total_loss = 0
if opt.checkpoint > 0 and ((time.time()-cptime)//60) // opt.checkpoint >= 1:
torch.save(model.state_dict(), 'weights/model_weights')
cptime = time.time()
print("%dm: epoch %d [%s%s] %d%% loss = %.3f\nepoch %d complete, loss = %.03f" %\
((time.time() - start)//60, epoch + 1, "".join('#'*(100//5)), "".join(' '*(20-(100//5))), 100, avg_loss, epoch + 1, avg_loss))
print('errors per epoch:', errors_per_epoch)
if opt.calculate_val_loss:
model.eval()
val_losses = []
for i, batch in enumerate(opt.val):
src = batch.src.transpose(0, 1).cuda()
trg = batch.trg.transpose(0, 1).cuda()
trg_input = trg[:, :-1]
src_mask, trg_mask = create_masks(src, trg_input, opt)
preds = model(src, trg_input, src_mask, trg_mask)
ys = trg[:, 1:].contiguous().view(-1)
opt.optimizer.zero_grad()
loss = F.cross_entropy(preds.view(-1, preds.size(-1)), ys, ignore_index=opt.trg_pad)
val_losses.append(loss.item())
print('validation loss:', sum(val_losses)/len(val_losses), '\n')
if (epoch + 1) % opt.val_check_every_n == 0:
model.eval()
val_acc, val_success = 0, 0
val_data = zip_io_data(opt.data_path + '/val')
for j, e in enumerate(val_data[:opt.n_val]):
e_src, e_tgt = e[0], e[1]
if opt.compositional_eval:
controller = eval_split_input(e_src)
intermediates = []
comp_failure = False
for controller_input in controller:
if len(controller_input) == 1:
controller_src = controller_input[0]
else:
controller_src = ''
for src_index in range(len(controller_input) - 1):
controller_src += intermediates[controller_input[src_index]] + ' @@SEP@@ '
controller_src += controller_input[-1]
controller_src = remove_whitespace(controller_src)
indexed = []
sentence = SRC.preprocess(controller_src)
for tok in sentence:
if SRC.vocab.stoi[tok] != 0:
indexed.append(SRC.vocab.stoi[tok])
else:
comp_failure = True
break
if comp_failure:
break
sentence = Variable(torch.LongTensor([indexed]))
if opt.device == 0:
sentence = sentence.cuda()
try:
sentence = beam_search(sentence, model, SRC, TRG, opt)
intermediates.append(sentence)
except Exception as e:
comp_failure = True
break
if not comp_failure:
try:
val_acc += simple_em(intermediates[-1], e_tgt)
val_success += 1
except Exception as e:
continue
else:
sentence = SRC.preprocess(e_src)
indexed = [SRC.vocab.stoi[tok] for tok in sentence]
sentence = Variable(torch.LongTensor([indexed]))
if opt.device == 0:
sentence = sentence.cuda()
try:
sentence = beam_search(sentence, model, SRC, TRG, opt)
except Exception as e:
continue
try:
val_acc += simple_em(sentence, e_tgt)
val_success += 1
except Exception as e:
continue
if val_success == 0:
val_success = 1
val_acc = val_acc / val_success
print('epoch', epoch, '- val accuracy:', round(val_acc * 100, 2))
print()
opt.scheduler.step(val_acc)
if epoch == opt.epochs - 1 and opt.do_test:
model.eval()
test_data = zip_io_data(opt.data_path + '/test')
test_predictions = ''
test_acc, test_success = 0, 0
for j, e in enumerate(test_data[:opt.n_test]):
if (j + 1) % 10000 == 0:
print(round(j/len(test_data) * 100, 2), '% complete with testing')
e_src, e_tgt = e[0], e[1]
if opt.compositional_eval:
controller = eval_split_input(e_src)
intermediates = []
comp_failure = False
for controller_input in controller:
if len(controller_input) == 1:
controller_src = controller_input[0]
else:
controller_src = ''
for src_index in range(len(controller_input) - 1):
controller_src += intermediates[controller_input[src_index]] + ' @@SEP@@ '
controller_src += controller_input[-1]
controller_src = remove_whitespace(controller_src)
indexed = []
sentence = SRC.preprocess(controller_src)
for tok in sentence:
if SRC.vocab.stoi[tok] != 0:
indexed.append(SRC.vocab.stoi[tok])
else:
comp_failure = True
break
if comp_failure:
break
sentence = Variable(torch.LongTensor([indexed]))
if opt.device == 0:
sentence = sentence.cuda()
try:
sentence = beam_search(sentence, model, SRC, TRG, opt)
intermediates.append(sentence)
except Exception as e:
comp_failure = True
break
if not comp_failure:
try:
test_acc += simple_em(sentence, e_tgt)
test_success += 1
test_predictions += sentence + '\n'
except Exception as e:
test_predictions += '\n'
continue
else:
test_predictions += '\n'
else:
indexed = []
sentence = SRC.preprocess(e_src)
pass_bool = False
for tok in sentence:
if SRC.vocab.stoi[tok] != 0:
indexed.append(SRC.vocab.stoi[tok])
else:
pass_bool = True
break
if pass_bool:
continue
sentence = Variable(torch.LongTensor([indexed]))
if opt.device == 0:
sentence = sentence.cuda()
try:
sentence = beam_search(sentence, model, SRC, TRG, opt)
except Exception as e:
continue
try:
test_acc += simple_em(sentence, e_tgt)
test_success += 1
test_predictions += sentence + '\n'
except Exception as e:
test_predictions += '\n'
continue
if test_success == 0:
test_success = 1
test_acc = test_acc / test_success
print('test accuracy:', round(test_acc * 100, 2))
print()
if not os.path.exists(opt.output_dir):
os.makedirs(opt.output_dir)
with open(opt.output_dir + '/test_generations.txt', 'w', encoding='utf-8') as f:
f.write(test_predictions)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-data_path', required=True)
parser.add_argument('-output_dir', required=True)
parser.add_argument('-no_cuda', action='store_true')
parser.add_argument('-SGDR', action='store_true')
parser.add_argument('-val_check_every_n', type=int, default=3)
parser.add_argument('-calculate_val_loss', action='store_true')
parser.add_argument('-tensorboard_graph', action='store_true')
parser.add_argument('-compositional_eval', action='store_true')
parser.add_argument('-char_tokenization', action='store_true')
parser.add_argument('-alex', action='store_true')
parser.add_argument('-n_val', type=int, default=1000)
parser.add_argument('-n_test', type=int, default=1000)
parser.add_argument('-do_test', action='store_true')
parser.add_argument('-epochs', type=int, default=50)
parser.add_argument('-d_model', type=int, default=512)
parser.add_argument('-n_layers', type=int, default=6)
parser.add_argument('-heads', type=int, default=8)
parser.add_argument('-dropout', type=int, default=0.1)
parser.add_argument('-batchsize', type=int, default=3000)
parser.add_argument('-printevery', type=int, default=100)
parser.add_argument('-lr', type=int, default=0.00001)
parser.add_argument('-load_weights')
parser.add_argument('-create_valset', action='store_true')
parser.add_argument('-max_strlen', type=int, default=512)
parser.add_argument('-floyd', action='store_true')
parser.add_argument('-checkpoint', type=int, default=0)
opt = parser.parse_args()
opt.device = 0 if opt.no_cuda is False else -1
if opt.device == 0:
assert torch.cuda.is_available()
if opt.alex:
torch.cuda.set_device(1)
read_data(opt)
SRC, TRG = create_fields(opt)
opt.train, opt.val = create_dataset(opt, SRC, TRG)
model = get_model(opt, len(SRC.vocab), len(TRG.vocab), SRC)
if opt.tensorboard_graph:
writer = SummaryWriter('runs')
for i, batch in enumerate(opt.train):
src = batch.src.transpose(0, 1).cuda()
trg = batch.trg.transpose(0, 1).cuda()
trg_input = trg[:, :-1]
src_mask, trg_mask = create_masks(src, trg_input, opt)
writer.add_graph(model, (src, trg_input, src_mask, trg_mask))
break
writer.close()
# beam search parameters
opt.k = 1
opt.max_len = opt.max_strlen
opt.optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, betas=(0.9, 0.98), eps=1e-9)
opt.scheduler = ReduceLROnPlateau(opt.optimizer, factor=0.2, patience=5, verbose=True)
if opt.SGDR:
opt.sched = CosineWithRestarts(opt.optimizer, T_max=opt.train_len)
if opt.checkpoint > 0:
print("model weights will be saved every %d minutes and at end of epoch to directory weights/"%(opt.checkpoint))
train_model(model, opt, SRC, TRG)
if opt.floyd is False:
promptNextAction(model, opt, SRC, TRG)
def yesno(response):
while True:
if response != 'y' and response != 'n':
response = input('command not recognised, enter y or n : ')
else:
return response
def promptNextAction(model, opt, SRC, TRG):
saved_once = 1 if opt.load_weights is not None or opt.checkpoint > 0 else 0
if opt.load_weights is not None:
dst = opt.load_weights
if opt.checkpoint > 0:
dst = 'weights'
while True:
save = yesno(input('training complete, save results? [y/n] : '))
if save == 'y':
while True:
if saved_once != 0:
res = yesno("save to same folder? [y/n] : ")
if res == 'y':
break
dst = input('enter folder name to create for weights (no spaces) : ')
if ' ' in dst or len(dst) < 1 or len(dst) > 30:
dst = input("name must not contain spaces and be between 1 and 30 characters length, enter again : ")
else:
try:
os.mkdir(dst)
except:
res= yesno(input(dst + " already exists, use anyway? [y/n] : "))
if res == 'n':
continue
break
print("saving weights to " + dst + "/...")
torch.save(model.state_dict(), f'{dst}/model_weights')
if saved_once == 0:
pickle.dump(SRC, open(f'{dst}/SRC.pkl', 'wb'))
pickle.dump(TRG, open(f'{dst}/TRG.pkl', 'wb'))
saved_once = 1
print("weights and field pickles saved to " + dst)
res = yesno(input("train for more epochs? [y/n] : "))
if res == 'y':
while True:
epochs = input("type number of epochs to train for : ")
try:
epochs = int(epochs)
except:
print("input not a number")
continue
if epochs < 1:
print("epochs must | |
<reponame>bvbohnen/X4_Customizer
from time import time
from itertools import chain
from collections import OrderedDict
from Framework import Settings
from PyQt5 import QtWidgets
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from ..Shared.Misc import Set_Icon, Set_Foreground_Color
#-Removed; all file system access moved elsewhere.
#from Framework import File_System
class VFS_Item:
'''
Represents a VFS folder or file.
To be a little lighter weight, nested files will sometimes be tracked
by name, while only nested folders are consistently tracked as VFS_Items.
Attributes:
* virtual_path
- String, path to this folder.
* parent_path
- String, virtual_path of the parent folder.
* parent
- VFS_Item representing the parent folder.
- None for the top level.
* name
- String, name of this folder or file.
- At the top level, this is just 'root'.
* folders
- List of VFS_Item objects that are folders under this one.
- Empty if this is a file.
* files
- List of VFS_Item objects that are files under this one.
- Empty if this is a file.
* file_paths
- List of strings, virtual_paths of files that are at this folder level.
- Empty if this is a file.
* is_folder
- Bool, True if this is a folder, else it is a file.
* shared_file_info_dict
- Dict present in the parent window, keyed by virtual_paths,
holding some parsed file information.
- This should be the same for all vfs items, and will be passed
to generated children, to be used in coloring and similar checks.
- This dict link is ensured never to change, though the contents
may be swapped around (eg. nested dicts could be removed and
remade).
* window
- The gui window holding this item. Used for Print lookup.
'''
def __init__(
self,
virtual_path,
is_folder,
shared_file_info_dict,
window,
):
self.virtual_path = virtual_path
self.is_folder = is_folder
self.shared_file_info_dict = shared_file_info_dict
self.parent = None
self.window = window
# Split on the last '/', though it may not be present.
*parent, self.name = virtual_path.rsplit('/',1)
if parent:
self.parent_path = parent[0]
else:
self.parent_path = ''
if not self.name:
self.name = 'root'
# To reduce weight, only make these lists for folders.
if is_folder:
self.folders = []
self.files = []
self.file_paths = []
return
def Add_Item(self, vfs_item):
'''
Record the child item under this folder.
It will be added to folders or files based on its is_folder flag.
'''
if vfs_item.is_folder:
self.folders.append(vfs_item)
else:
self.files.append(vfs_item)
vfs_item.parent = self
return
def Get_Folders(self):
'''
Returns all child folders.
'''
return self.folders
def Get_Files(self):
'''
Returns all child files.
'''
return self.files
def Build_Files(self):
'''
From file_paths, construct VFS_Items and fill in the files list.
This should be called only when needed, since there is some
delay on the creation that is significant if all folders do
it at once.
Does nothing if files are already present.
'''
# TODO: maybe set this up to only fill in missing files, so
# that some files can be precreated, or files can be added
# after a first pass.
if self.files:
return
for virtual_path in self.file_paths:
# Create the item, passing along the file info.
self.Add_Item(VFS_Item(
virtual_path,
is_folder = False,
shared_file_info_dict = self.shared_file_info_dict,
window = self.window,
))
return
#-Removed; this should never be needed, and isn't entirely safe.
#def Get_Game_File(self):
# '''
# If this is a file, returns the Game_File object for it,
# or None if there is a loading error.
# '''
# if self.is_folder:
# return
# return File_System.Load_File(self.virtual_path)
def Get_Q_Item(self):
'''
Returns a new QStandardItem representing this item, annotated with
'vfs_item' to link back here.
'''
q_item = QStandardItem(self.name)
q_item.vfs_item = self
# Give it a nice icon.
if self.is_folder:
Set_Icon(q_item, 'SP_DirIcon')
else:
Set_Icon(q_item, 'SP_FileIcon')
self.Color_Q_Item(q_item)
# -Removed; regulate when children are expanded.
## Add any children q items.
#for child_q_item in self.Get_Child_Q_Items(
# include_folders = include_folders,
# include_files = include_files,
# recursive = recursive,
# top_call = False):
# q_item.appendRow(child_q_item)
return q_item
def Color_Q_Item(self, q_item):
'''
Apply coloring to the given QStandardItem. To be used on q_item
creation, or to be given a q_item from a higher level when updating
colors.
'''
# Color it based on file status.
# Special color if both patched and modified.
if self.Is_Patched_Modified():
color = 'darkviolet'
elif self.Is_Modified():
color = 'crimson'
elif self.Is_Patched():
color = 'blue'
elif self.Is_Loaded():
# Go with something interesting, maybe green?
color = 'green'
else:
# To make viewing nicer, default to black.
color = 'black'
Set_Foreground_Color(q_item, color)
return
def Get_Child_Q_Items(
self,
include_folders = False,
include_files = False,
base_q_item = None,
):
'''
Returns a list of QStandardItems for each of its children.
If given a parent q_item (should have been generated previously
by Get_Q_Item on this vfs_item), the children will be appended
to it as subrows.
If neither flag is set, returns an emtpy list.
* include_folders
- Bool, include child folders.
* include_files
- Bool, include child files.
* base_q_item
- Optional, QStandardItem linked to this vfs_item which will
attach to the child q_items as a parent.
'''
if not self.is_folder:
return []
if Settings.profile:
start = time()
ret_list = []
if include_folders:
for subitem in sorted(self.folders, key = lambda x : x.name):
ret_list.append( subitem.Get_Q_Item())
if include_files:
# Make sure files are built.
self.Build_Files()
for subitem in sorted(
self.files,
# Sort these such that modified/patched/etc. show up first.
# Want it to be somewhat exclusively categorized, eg. all
# modified files are grouped together and sorted by name
# and not based on which are patched.
# Flip the flags so that a 'True' sorts first.
key = lambda x : ( not x.Is_Patched_Modified(),
not x.Is_Modified(),
not x.Is_Patched(),
not x.Is_Loaded(),
x.name)
):
ret_list.append( subitem.Get_Q_Item())
# Add the children to the parent q_item, if provided.
if base_q_item != None:
# Verify the base_q_item links back to this vfs_item.
assert base_q_item.vfs_item is self
# Add children as subrows.
for child_q_item in ret_list:
base_q_item.appendRow(child_q_item)
if Settings.profile:
self.window.Print('VFS_Item.Get_Child_Q_Items time: {:.3f} s'.format(
time() - start
))
return ret_list
def Get_Parent_Q_Item(self):
'''
Returns a new QStandardItem for this item's parent.
It will have no file/folder children.
If there is no parent, returns None.
'''
if self.parent == None:
return
return self.parent.Get_Q_Item()
def Lookup_File_Info(self, field):
'''
Looks up the given field in the shared_file_info_dict, returning
its value or None if a matching entry not found.
Folders will check all children and attempt to join their
values together: bools get OR'd, lists get joined.
'''
ret_var = None
if not self.is_folder:
if self.virtual_path in self.shared_file_info_dict:
ret_var = self.shared_file_info_dict[self.virtual_path].get(field, None)
else:
# First pass collects the values; second pass joins them.
values = []
# Try file names (the files may not be created yet).
for path in self.file_paths:
if path in self.shared_file_info_dict:
values.append(self.shared_file_info_dict[path].get(field, None))
# Also get subfolders.
for folder in self.folders:
values.append(folder.Lookup_File_Info(field))
# Join all values together.
# This is a little clumsy, skipping None and checking for
# a bool or list.
for value in values:
if value == None:
continue
if isinstance(value, bool):
if ret_var == None:
ret_var = value
else:
ret_var |= value
elif isinstance(value, list):
if ret_var == None:
ret_var = []
ret_var += value
return ret_var
def Is_Loaded(self):
'''
For files, returns True if the File_System has a copy of
the file loaded, else False.
'''
# This will convert None to False.
if self.Lookup_File_Info('loaded'):
return True
return False
def Is_Patched(self):
'''
For files, returns True if the file is partly or wholly
by an extension. This ignores customizer modifications.
'''
if self.Lookup_File_Info('patched'):
return True
return False
def Is_Modified(self):
'''
For files, returns True if the file has been modified by
the customizer script.
'''
if self.Lookup_File_Info('modified'):
return True
return False
def Is_Patched_Modified(self):
'''
For files, teturns True if the file has been modified
by the customizer script and was original sourced from
an extension.
'''
return self.Is_Modified() and self.Is_Patched()
| |
<reponame>kirillyat/klever
#
# Copyright (c) 2021 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import pytest
import logging
from klever.core.vtg.emg.common.c import Function
from klever.core.vtg.emg.common.c.source import Source
from klever.core.vtg.emg.common.process import ProcessCollection
from klever.core.vtg.emg.common.process.serialization import CollectionDecoder
from klever.core.vtg.emg.decomposition.separation import SeparationStrategy
from klever.core.vtg.emg.decomposition.separation.linear import LinearStrategy
from klever.core.vtg.emg.decomposition.modelfactory.selective import SelectiveFactory
MAIN = {
"comment": "Main process.",
"labels": {},
"process": "<root>",
"actions": {
"root": {
"comment": "Some action",
"statements": []
}
}
}
REGISTER = {
"comment": "",
"labels": {"container": {"declaration": "struct validation *var"}},
"process": "[register_p1]",
"actions": {
"register_p1": {"parameters": ["%container%"]}
}
}
DEREGISTER = {
"comment": "",
"labels": {"container": {"declaration": "struct validation *var"}},
"process": "[deregister_p1]",
"actions": {
"deregister_p1": {"parameters": ["%container%"]}
}
}
B1 = {
"comment": "",
"labels": {
"container": {"declaration": "struct validation *var"},
"ret": {"declaration": "int x", "value": "0"}
},
"process": "(!register_p1).{main}",
"actions": {
"main": {
"comment": "",
"process": "<probe>.(<success>.[register_p2] | <fail>.<remove>).{main} | (deregister_p1)"
},
"register_p1": {
"condition": ["$ARG1 != 0"],
"parameters": ['%container%'],
"savepoints": {'s1': {"statements": []}}
},
"probe": {
"comment": "Do probing.",
"statements": ["%ret% = f4(%container%);"]
},
"success": {
"comment": "Successful probing.",
"condition": ["%ret% == 0"]
},
"fail": {
"comment": "Failed probing.",
"condition": ["%ret% != 0"]
},
"deregister_p1": {
"parameters": ['%container%']
},
"remove": {
"comment": "Removing.",
"statements": ["$FREE(%container%);"]
},
"register_p2": {
"parameters": ['%container%']
}
}
}
B2 = {
"comment": "",
"labels": {
"container": {"declaration": "struct validation *var"}
},
"process": "(!register_p2).([read] | [write])",
"actions": {
"register_p2": {
"parameters": ['%container%'],
"savepoints": {'s2': {"statements": []}},
"require": {"c/p1": {"include": ["probe", "success"]}}
},
"read": {"comment": "", "statements": []},
"write": {"comment": "Do write.", "statements": []}
}
}
@pytest.fixture()
def model():
files = ['test.c']
functions = {
'f1': "static int f1(struct test *)",
'f2': "static void f2(struct test *)"
}
source = Source(files, [], dict())
for name, declaration_str in functions.items():
new = Function(name, declaration_str)
new.definition_file = files[0]
source.set_source_function(new, files[0])
spec = {
"name": 'base',
"functions models": {
"f1": REGISTER,
"f2": DEREGISTER,
},
"environment processes": {
"c/p1": B1,
"c/p2": B2
},
"main process": MAIN
}
collection = CollectionDecoder(logging, dict()).parse_event_specification(source,
json.loads(json.dumps(spec)),
ProcessCollection())
return collection
P1 = {
"comment": "",
"labels": {},
"process": "(!register_p1).<init>.(<exit> | <init_failed>)",
"actions": {
"register_p1": {
"parameters": [],
"savepoints": {
'sp_init_first': {"statements": []},
'sp_init_second': {"statements": []},
'sp_init_third': {"statements": []}
}
},
"init": {"comment": ""},
"exit": {"comment": ""},
"init_failed": {"comment": ""}
}
}
REGISTER_P2 = {
"comment": "",
"labels": {},
"process": "[register_p2]",
"actions": {"register_p2": {}}
}
DEREGISTER_P2 = {
"comment": "",
"labels": {},
"process": "[deregister_p2]",
"actions": {"deregister_p2": {}}
}
P2 = {
"comment": "",
"labels": {"ret": {"declaration": "int x"}},
"process": "(!register_p2).{main}",
"actions": {
"main": {
"comment": "Test initialization.",
"process": "<probe>.(<success>.[register_p3].[deregister_p3] | <fail>.<remove>).{main} | (deregister_p2)"
},
"register_p2": {
"parameters": [],
"require": {
"c/p1": {"include": ["init", "exit"]}
}
},
"deregister_p2": {"parameters": []},
"probe": {"comment": ""},
"success": {"comment": "", "condition": ["%ret% == 0"]},
"fail": {"comment": "Failed probing.", "condition": ["%ret% != 0"]},
"remove": {"comment": ""},
"register_p3": {"parameters": []},
"deregister_p3": {"parameters": []}
}
}
P3 = {
"comment": "",
"labels": {},
"process": "(!register_p3).<init>.{scenario1}",
"actions": {
"register_p3": {
"parameters": [],
"savepoints": {
'sp_init_p3': {"statements": [], "comment": "test comment"}
},
"require": {
"c/p2": {"include": ["register_p3", "deregister_p3"]}
}
},
"deregister_p3": {"parameters": []},
"free": {"comment": ""},
"terminate": {"comment": "", "process": "<free>.(deregister_p3)"},
"init": {"comment": ""},
"create": {"comment": ""},
"create_fail": {"comment": ""},
"create2": {"comment": ""},
"create2_fail": {"comment": ""},
"success": {"comment": ""},
"work1": {"comment": ""},
"work2": {"comment": ""},
"register_p4": {"parameters": []},
"deregister_p4": {"parameters": []},
"create_scenario": {
"comment": "",
"process": "<create>.(<success>.({work_scenario} | {p4_scenario}) | <create_fail>.{terminate})"
},
"create2_scenario": {"comment": "", "process": "<create2>.(<create2_fail> | <success>).{terminate}"},
"work_scenario": {"comment": "", "process": "(<work1> | <work2>).{terminate}"},
"p4_scenario": {"comment": "", "process": "[register_p4].[deregister_p4].{terminate}"},
"scenario1": {"comment": "", "process": "{create_scenario} | {create2_scenario}"}
}
}
P4 = {
"comment": "",
"labels": {},
"process": "(!register_p4).<write>.(deregister_p4)",
"actions": {
"register_p4": {
"parameters": [],
"require": {
"c/p3": {"include": ["register_p4"]}
}
},
"deregister_p4": {"parameters": []},
"write": {"comment": ""}
}
}
P5 = {
"comment": "",
"labels": {},
"process": "(!register_p2).(<w1> | <w2>).(deregister_p2)",
"actions": {
"register_p2": {
"parameters": [],
"savepoints": {
'sp_p5': {"statements": []}
}
},
"deregister_p2": {"parameters": []},
"w1": {"comment": ""},
"w2": {"comment": ""}
}
}
P6 = {
"comment": "The process that does not rely on any other.",
"labels": {},
"process": "(!register_unique).(<w1> | <w2>)",
"actions": {
"register_unique": {
"parameters": [],
"savepoints": {
'sp_unique_1': {"statements": []},
'sp_unique_2': {"statements": []}
}
},
"w1": {"comment": ""},
"w2": {"comment": ""}
}
}
@pytest.fixture()
def double_init_model():
files = ['test.c']
functions = {
'f1': "static int f1(struct test *)",
'f2': "static void f2(struct test *)"
}
source = Source(files, [], dict())
for name, declaration_str in functions.items():
new = Function(name, declaration_str)
new.definition_file = files[0]
source.set_source_function(new, files[0])
c1p1 = {
"comment": "Category 1, process 1.",
"process": "(!register_c1p1).<init>.(<ok>.[register_c2p2].[deregister_c2p2] | <fail>)",
"actions": {
"register_c1p1": {
"parameters": [],
"savepoints": {
"s1": {"statements": []}
}
},
"register_c2p2": {"parameters": []},
"deregister_c2p2": {"parameters": []},
"init": {"coment": ""},
"ok": {"coment": ""},
"fail": {"coment": ""}
}
}
c1p2 = {
"comment": "Category 1, process 1.",
"process": "(!register_c1p2).<init>.(<ok> | <fail>)",
"actions": {
"register_c1p2": {
"parameters": [],
"savepoints": {
"basic": {"statements": []}
}
},
"init": {"coment": ""},
"ok": {"coment": ""},
"fail": {"coment": ""}
}
}
c2p1 = {
"comment": "Category 2, process 1.",
"process": "(!register_p1).<probe>.(deregister_p1)",
"labels": {"container": {"declaration": "struct validation *var"}},
"actions": {
"register_p1": {
"parameters": ["%container%"],
"require": {
"c1/p1": {"include": ["ok"]},
"c1/p2": {"include": ["ok"]}
}
},
"deregister_p1": {"parameters": ["%container%"]},
"probe": {"comment": ""},
}
}
c2p2 = {
"comment": "Category 2, process 2.",
"process": "(!register_c2p2).(<v1> | <v2>).(deregister_c2p2)",
"actions": {
"register_c2p2": {
"parameters": [], "require": {"c2/p1": {"include": ["probe"]}}
},
"deregister_c2p2": {"parameters": []},
"v1": {"comment": ""},
"v2": {"comment": ""}
}
}
spec = {
"name": 'test_model',
"functions models": {
"f1": REGISTER,
"f2": DEREGISTER
},
"environment processes": {
"c1/p1": c1p1,
"c1/p2": c1p2,
"c2/p1": c2p1,
"c2/p2": c2p2
}
}
collection = CollectionDecoder(logging, dict()).parse_event_specification(source,
json.loads(json.dumps(spec)),
ProcessCollection())
return collection
@pytest.fixture()
def advanced_model():
files = ['test.c']
functions = {
'f1': "static int f1(struct test *)",
'f2': "static void f2(struct test *)"
}
source = Source(files, [], dict())
for name, declaration_str in functions.items():
new = Function(name, declaration_str)
new.definition_file = files[0]
source.set_source_function(new, files[0])
spec = {
"functions models": {
"f1": REGISTER_P2,
"f2": DEREGISTER_P2,
},
"environment processes": {
"c/p1": P1,
"c/p2": P2,
"c/p3": P3,
"c/p4": P4
}
}
collection = CollectionDecoder(logging, dict()).parse_event_specification(source,
json.loads(json.dumps(spec)),
ProcessCollection())
return collection
@pytest.fixture()
def advanced_model_with_unique():
files = ['test.c']
functions = {
'f1': "static int f1(struct test *)",
'f2': "static void f2(struct test *)"
}
source = Source(files, [], dict())
for name, declaration_str in functions.items():
new = Function(name, declaration_str)
new.definition_file = files[0]
source.set_source_function(new, files[0])
spec = {
"functions models": {
"f1": REGISTER_P2,
"f2": DEREGISTER_P2,
},
"environment processes": {
"c/p1": P1,
"c/p2": P2,
"c/p3": P3,
"c/p4": P4,
"c/p6": P6
}
}
collection = CollectionDecoder(logging, dict()).parse_event_specification(source,
json.loads(json.dumps(spec)),
ProcessCollection())
return collection
@pytest.fixture()
def model_with_independent_process():
files = ['test.c']
functions = {
'f1': "static int f1(struct test *)",
'f2': "static void f2(struct test *)"
}
source = Source(files, [], dict())
for name, declaration_str in functions.items():
new = Function(name, declaration_str)
new.definition_file = files[0]
source.set_source_function(new, files[0])
spec = {
"functions models": {
"f1": REGISTER_P2,
"f2": DEREGISTER_P2,
},
"environment processes": {
"c/p1": P1,
"c/p2": P2,
"c/p5": P5
},
"main process": MAIN
}
collection = CollectionDecoder(logging, dict()).parse_event_specification(source,
json.loads(json.dumps(spec)),
ProcessCollection())
return collection
@pytest.fixture()
def logger():
logger = logging.getLogger(__name__)
# todo: Uncomment when you will need a log or implement ini file
# logger.setLevel(logging.DEBUG)
# handler = logging.StreamHandler(sys.stdout)
# handler.setLevel(logging.DEBUG)
# logger.addHandler(handler)
return logger
def _obtain_model(logger, model, specification):
separation = SelectiveFactory(logger, specification)
scenario_generator = SeparationStrategy(logger, dict())
processes_to_scenarios = {str(process): list(scenario_generator(process)) for process in model.environment.values()}
return processes_to_scenarios, list(separation(processes_to_scenarios, model))
def _obtain_linear_model(logger, model, specification, separate_dispatches=False):
separation = SelectiveFactory(logger, specification)
scenario_generator = LinearStrategy(logger, dict() if not separate_dispatches else
{'add scenarios without dispatches': True})
processes_to_scenarios = {str(process): list(scenario_generator(process)) for process in model.environment.values()}
return processes_to_scenarios, list(separation(processes_to_scenarios, model))
def _to_sorted_attr_str(attrs):
return ", ".join(f"{k}: {attrs[k]}" for k in sorted(attrs.keys()))
def _expect_models_with_attrs(models, attributes):
model_attrs = {_to_sorted_attr_str(m.attributes) for m in models}
attrs = {_to_sorted_attr_str(attrs) for attrs in attributes}
unexpected = | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
: Project - Comparision experiments
: standalone segmentation using torch in-built models - fcn, deeplabv3, lr-aspp
: Author - <NAME>
: Institute - University of Kansas
: Date - 5/13/2021 last updated 5/15/2021
: Model Reference:
https://pytorch.org/vision/stable/models.html#semantic-segmentation
: HowTo:
0) This script adopts functoins from RGANet codes, can be execuated independently.
Requirments: pytorch >= 1.0.0, python >= 3.6, numpy
1) To specify parameters, refer to CONFIG for details.
2) You can copy and rename this script to run several different models at a time,
to do this, you must specify correct gpu using '-gpu' parameter (default: 0),
3) You may change optimizer and parameters in triaing part.
4) Don't forget to move 'evaluation.txt' before running another evaluation.
"""
import torch
import time
import argparse
import math
import numpy as np
import torch.nn.functional as ops
from pathlib import Path
from torchvision import transforms
from torchvision.utils import save_image
from torch.utils.data import TensorDataset
from PIL import Image
parser = argparse.ArgumentParser(description="Arguments for training, validation and testing")
parser.add_argument("-gpu", type = int, default = 0,
help = "Designate GPU # for trainig and testing")
# Training
parser.add_argument("-train", action = "store_true",
help = "Train network only")
parser.add_argument("-r", "--restore", action = "store_true",
help = "Restore training by loading specified checkpoint or lattest checkpoint")
# Accomodation to suction dataset
parser.add_argument("-i", "--image", type = Path,
default = r"../dataset/suction-based-grasping-dataset/data/color-input",
help = "Directory to training images")
parser.add_argument("-l", "--label", type = Path,
default = r"../dataset/suction-based-grasping-dataset/data/label",
help = "Directory to training annotations")
parser.add_argument("-c", "--checkpoint", type = Path, default = r"checkpoint",
help = "Checkpoint file path specified by users")
parser.add_argument("-d", "--dir", type = Path, default = r"results",
help = r"valid for test mode, specify the folder to save results and labels, or "
r"valid for validate mode, specify the source to load images/save results")
# Testing
parser.add_argument("-test", action = "store_true",
help = "Test and visualize only")
parser.add_argument("-v", "--validate", action = "store_true",
help = "Validate tesing results using metrics")
CONFIG = {
"MODEL": "deeplab", # choose between "fcn", "deeplab", "lr-aspp"
"BACKBONE": "mobilenet", # backbone "resnet50", "resnet101" for fcn
# backbone "resnet50", "resnet101", "mobilenet" for "deeplab"
# backbone "mobilenet" for "lr-aspp" (this will ignore backbone
# setting)
# Training
"DOUBLE": False, # double the size of training set, turn off when "AUGMENT" is False
"AUGMENT": False, # switch to enable augmentation, valid regardless of "DOUBLE"
"AUX_LOSS": False, # whether to apply auxiliary loss during training
"PRETRAIN": False, # use pre-trained weights from COCO train2017 (21 classes as Pascal VOC)
"SHOW_PROG": True, # displays a progress bar of the download if True
"BATCHSIZE": 18, # batchsize for training
"EPOCHS": 300, # epoches for training
"SHOW_LOSS": 10, # number of minibatchs processed to print training info
"SAVE_MODEL": 2, # epoch intervel to save, start counting from epoch 2
"SHUFFLE": True, # random shuffle
"NUM_WORKERS": 0, # set to 0 if memorry error occurs
"PIN_MEMORY": True, # set to false if memory is insufficient
"DROP_LAST": False,
"NUM_CLS": 3, # number of classes
"INT_CLS": (255, 0, 128), # raw label intensity levels to differentiate classes
# Testing
"DENORM": False, # set to True to disable testing normalization
"TEST_BATCH": 20, # batchsize for testing
"TEST_MUL": 5, # set a multiplier for testing
"TEST_TIME": 1, # show runtime stats done running certain number of testing batches
"TEST_WORKERS": 0, # set number of workers to run testing batches
"TEST_PIN": True, # set to True if memory is pinned for testing batches
"TEST_SAVE": False, # if tests are done, save results and their labels to disk
"TEST_RUNTIME": True, # False to disable runtime test
"TEST_BGR": True, # Test: True - target class save as bright color, otherwise cold color
# Validation: True - evaluation cls NUM_CLS-1, otherwise cls 0
# Validation
"MGD_INTV": (12, 12), # set intervals (dH, dW) for metric MGRD
"MGD_BETA": 6, # set beta for metric MGRD
"LAB_CLS": 2, # interested label class to evalate, range from 0 to NUM_CLS-1
# Augmentation
"SIZE": (480, 640), # (H, W)
"HAS_NORM": True, # normailzationm,for samples only
"PAR_NORM": {"mean": (0.485, 0.456, 0.406), # dictionary format with tuples
"std": (0.229, 0.224, 0.225)}, # standalone option
"HOR_FLIP": True, # random horizontal flip
"PH_FLIP": 0.5, # must be a number in [0, 1]
"VER_FLIP": True, # random vertical flip
"PV_FLIP": 0.5, # must be a number in [0, 1]
"SHIFT": True, # random affine transform, will not affect the label
"PAR_SFT": (0.2, 0.2), # must be a tuple if set "SHIFT" to True
"P_SFT": 0.6, # probablity to shift
"ROTATE": True, # rotate image
"ROT_DEG": math.pi, # rotation degree
"P_ROT": 0.4, # probability to rotate
"COLOR_JITTER": True, # random color random/fixed jitter
"P_JITTER": 0.2, # probability to jitter
"BRIGHTNESS": 0.5, # random brightness adjustment, float or (float, float)
"CONTRAST": 0.5, # random brightness adjustment, float or (float, float)
"SATURATION": 0.5, # random saturation adjustment, float or (float, float)
"HUE": 0.25, # random hue adjustment, float or (float, float)
"BLUR": True, # random gaussian blur
"P_BLUR": 0.3, # probability to blur image
"PAR_BLUR":
{"kernel": 15, # kernal size, can be either one int or [int, int]
"sigma": (0.5, 3.0)} # sigma, can be single one float
}
class SuctionDataset(torch.utils.data.Dataset):
def __init__(self, imgDir, labelDir, splitDir=None, mode="test", applyTrans=False, sameTrans=True):
super(SuctionDataset).__init__()
assert len(CONFIG["INT_CLS"]) > 1, "Must be more than 1 class"
assert len(CONFIG["INT_CLS"]) == CONFIG["NUM_CLS"], "Number of class does not match intensity levels"
assert len(CONFIG["SIZE"]) == 2, "Invalid SIZE format"
assert type(CONFIG["PAR_SFT"]) == tuple and len(CONFIG["PAR_SFT"]) == 2, "Invalid SHIFT parameters"
assert type(CONFIG["PAR_NORM"]) == dict, "Mean and std must be presented in a dict"
self.applyTran = applyTrans
self.sameTrans = sameTrans
self.mode = mode
# prepare for FCN training set
if mode in ["train", "test"]:
if splitDir and labelDir:
self.img = self.read_split_images(imgDir, splitDir, ".png", 1)
self.imgLen = len(self.img)
assert self.imgLen, "Empty dataset, please check directory"
self.nameList = list(self.img.keys())
self.W, self.H = self.img[self.nameList[0]].size
self.label = self.read_split_images(labelDir, splitDir, ".png", 0)
else:
raise IOError("Must specify training split file and annotation directory")
# prepare for validation. NOTE: network ONLY supports color samples and greyscale labels
if mode == "validate":
self.img = self.read_image_from_disk(imgDir, colorMode = 1)
self.imgLen = len(self.img)
assert self.imgLen, "Empty dataset, please check directory"
self.nameList = list(self.img.keys())
self.W, self.H = self.img[self.nameList[0]].size
self.label = self.read_image_from_disk(labelDir, colorMode = 0)
# get one pair of samples
def __getitem__(self, idx):
imgName = self.nameList[idx]
img, label = self.img[imgName], self.label[imgName]
# necesary transformation
operate = transforms.Compose([transforms.ToTensor(), self._transform_pad_image()])
img = operate(img)
label = self._convert_img_to_uint8_tensor(label)
# optical transformation
img = self.img_normalize(img)
img = self.img_random_color_jitter(img)
img = self.img_random_blur(img)
img, label = self.img_random_flip(img, label)
img, label = self.img_random_shift_rotate(img, label)
return img, label
# get length of total smaples
def __len__(self):
return self.imgLen
# read names/directories from text files
@classmethod
def read_image_id(cls, filePath: Path, postFix: str) -> [str]:
assert filePath.is_file(), f"Invalid file path:\n{filePath.absolute()}"
with open(filePath, 'r') as f:
imgNames = f.readlines()
return [] if not imgNames else [ _.strip()+postFix for _ in imgNames]
# directly read image from directory
@classmethod
def read_image_from_disk(cls, folderPath: Path, colorMode=1) -> {str: Image.Image}:
imgList = folderPath.glob("*")
return cls.read_image_data(imgList, colorMode)
# read a bunch of images from a list of image paths
@classmethod
def read_image_data(cls, imgList: [Path], colorMode=1) -> {str: Image.Image}:
dump = {}
for imgPath in imgList:
assert imgPath.is_file(), f"Invalid image path: \n{imgPath.absolute()}"
img = Image.open(imgPath)
if not colorMode: img = img.convert('L')
dump[imgPath.stem] = img
return dump
# read images according to split lists
@classmethod
def read_split_images(cls, imgRootDir: Path, filePath: Path, postFix=".png", colorMode=1) -> {str: Path}:
imgList = cls.read_image_id(filePath, postFix)
imgList = [imgRootDir.joinpath(_) for _ in imgList]
return cls.read_image_data(imgList, colorMode)
# PIL label to resized tensor
def _convert_img_to_uint8_tensor(self, label: Image) -> torch.Tensor:
dummy = np.array(label, dtype = np.uint8)
assert dummy.ndim == 2, "Only for grayscale labelling images"
save = []
if self.mode in ["trainGCRF", "testGCRF"]:
intLevels = CONFIG["INTCL_GCRF"]
else:
intLevels = CONFIG["INT_CLS"]
for idx, val in enumerate(intLevels):
save.append(np.where(dummy == val))
for idx, val in enumerate(save):
dummy[val] = idx
dummy = torch.tensor(dummy, dtype = torch.uint8)
dummy = self._transform_pad_image()(dummy)
return dummy
# one-hot encoder for int64 label tensor
@staticmethod
def one_hot_encoder(label: torch.Tensor) -> torch.Tensor:
assert len(label.shape) == 3, r"Length of the tensor | |
<filename>src/functions.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
from scipy import stats
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import textdistance
from collections import Counter
import os
import pandas as pd
import numpy as np
import pickle
from datetime import datetime
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, precision_score, recall_score, classification_report
def timer(start_time=None):
if not start_time:
start_time = datetime.now()
return start_time
elif start_time:
thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)
tmin, tsec = divmod(temp_sec, 60)
print('\n Time taken: %i hours %i minutes and %s seconds.' % (thour, tmin, round(tsec, 2)))
def evaluation(y, y_hat, title = 'Confusion Matrix'):
'''takes in true values and predicted values.
The function then prints out a classifcation report
as well as a confusion matrix using seaborn's heatmap.'''
cm = confusion_matrix(y, y_hat)
precision = precision_score(y, y_hat, average = 'weighted')
recall = recall_score(y, y_hat, average = 'weighted')
accuracy = accuracy_score(y,y_hat)
print(classification_report(y, y_hat))
print('Accurancy: ', accuracy)
sns.heatmap(cm, cmap= 'Greens', annot=True)
plt.xlabel('predicted')
plt.ylabel('actual')
plt.title(title)
plt.show()
def load_train_add_target_df():
'''This function loads the three
datasets from the data folder and
creates a dataframe for each dataset.
The train data and train target
datasets are merged together to
complete the training dataset with
the target label'''
train_data = pd.read_csv('../data/train_data.csv')
train_target = pd.read_csv('../data/train_targets.csv')
train = train_data.merge(train_target, on='id', how='inner')
return train
def load_test_df():
test = pd.read_csv('../data/test_set_values.csv')
return test
def numeric_status_group():
status_group_numeric = {'functional': 2,
'functional needs repair': 1,
'non functional': 0}
return status_group_numeric
def encode_region(df, num_of_bins=7):
'''
Takes in Tanzania Water Point Data, groups by region,
sorts regions by proportion of non-functional wells,
and bins them in equally-sized bins according to
num_of_bins parameter
returns: DataFrame with 'region_bins' column added
and with 'region' and 'region_code' columns dropped
'''
#group DataFrame by region and count each type of waterpoint
reg = df.groupby('region')['status_group'].value_counts().unstack()
#calculate proportion of non-functional waterpoints in each region
reg['total'] = reg.sum(axis=1)
reg['non'] = reg['non functional'] / reg['total']
#sort by that proportion
reg = reg.sort_values('non')
#sort regions into specified number of equally wide bins
bin_labels = list(range(num_of_bins))
reg['region_bins'] = pd.cut(reg.non, bins=num_of_bins,
labels=bin_labels)
codes = reg.region_bins
#return bin numbers attached to dataframe
return df.join(codes, on='region').drop(['region','region_code'], axis=1)
def categorize_funder(train):
'''This function will go through every row in
the dataframe column funder and if the value
is any of the top 7 fields, will return those
values. If the value does not equal any of these
top 7 fields, it will return other.'''
if train['funder'] == 'Government Of Tanzania':
return 'govt'
elif train['funder'] == 'Danida':
return 'danida'
elif train['funder'] == 'Hesawa':
return 'hesawa'
elif train['funder'] == 'Rwssp':
return 'rwssp'
elif train['funder'] == 'World Bank':
return 'world_bank'
elif train['funder'] == 'Kkkt':
return 'kkkt'
elif train['funder'] == 'World Vision':
return 'world_vision'
else:
return 'other'
def categorize_installer(train):
'''This function will go through
every row in the installer column
and if the value is equal to any of
the top 7, will return those values.
If not, will return other.'''
if train['installer'] == 'DWE':
return 'dwe'
elif train['installer'] == 'Government':
return 'govt'
elif train['installer'] == 'RWE':
return 'rwe'
elif train['installer'] == 'Commu':
return 'commu'
elif train['installer'] == 'DANIDA':
return 'danida'
elif train['installer'] == 'KKKT':
return 'kkkt'
elif train['installer'] == 'Hesawa':
return 'hesawa'
else:
return 'other'
def numeric_public(row):
if row['public_meeting'] == True:
return 1
else:
return 0
def categorize_scheme(row):
'''This function will go through each
row in the scheme management column
and if the value is equal to any of the
top 7, will return those values. If not,
will categorize the value as other.'''
if row['scheme_management'] == 'VWC':
return 'vwc'
elif row['scheme_management'] == 'WUG':
return 'wug'
elif row['scheme_management'] == 'Water authority':
return 'water_authority'
elif row['scheme_management'] == 'WUA':
return 'wua'
elif row['scheme_management'] == 'Water Board':
return 'water_board'
elif row['scheme_management'] == 'Parastatal':
return 'parastatal'
elif row['scheme_management'] == 'Private operator':
return 'private_operator'
else:
return 'other'
def permit(row):
if row['permit'] == True:
return 1
else:
return 0
def encode_lga(df):
'''
encodes the 'lga' column into the values
'rural', 'urban', and 'other'
returns DataFrame with column 'lga_coded'
'''
lga_e = []
for entry in df.lga:
key = entry.split()[-1]
if key == 'Rural':
lga_e.append('rural')
elif key == 'Urban':
lga_e.append('urban')
else:
lga_e.append('other')
df['lga_coded'] = lga_e
return df.drop('lga', axis=1)
def load_processed_train_df():
'''This function takes the inital
dataframe created with the first
function in this file, imputes the
missing values in the dataframe, bins
a few columns into categories, and
removes columns that provide the same
information as another column in the
dataframe'''
train = load_train_add_target_df()
#Creating the status column for numerically transformed status_group
train['status'] = train.status_group.replace(numeric_status_group())
train = combine_extraction(train)
train = combine_managements(train)
train = combine_installer_funder(train)
train = combine_waterpoint(train)
train = clean_permit(train)
train = create_decades(train)
#Encode and bin region field and drop original region column
train = encode_region(train)
#Encode lga field
train = encode_lga(train)
#Filling public meeting
train['public_meeting'].fillna('not_known', inplace=True)
#Removing subvillage field
train = train.drop(columns=['subvillage'], axis=1)
#Removing scheme name field
train = train.drop(columns=['scheme_name'], axis=1)
#Removing wpt name field
train = train.drop(columns=['wpt_name'], axis=1)
#Removing recorded by field
train = train.drop(columns=['recorded_by'], axis=1)
#Dropping construction year by decade
train.drop(['construction_year'], axis=1, inplace=True)
#Removing payment type field
train = train.drop(columns=['water_quality'], axis=1)
#removing payment
train = train.drop(columns=['payment_type'], axis=1)
#Removing water quality field
train = train.drop(columns=['quantity_group'], axis=1)
#Removing source type field
train = train.drop(columns=['source_class'], axis=1)
train = train.drop(columns=['source'], axis=1)
#train = train.drop(columns=['source_type'], axis=1)
#Removing waterpoint type group field
#train = train.drop(columns=['waterpoint_type_group'], axis=1)
return train
"""
==================================================================================================================
Max's cleaning Functions
==================================================================================================================
"""
def combiner(row, col_1, col_2):
if row[col_1]!=row[col_2]:
return f'{row[col_1]}/{row[col_2]}'
else:
return row[col_1]
def fill_unknown(row, col_1, col_2, unknown):
if (row[col_1] in unknown) &\
(row[col_2] in unknown):
row[col_1] = 'unknown'
row[col_2] = 'unknown'
return row
elif row[col_1] in unknown:
row[col_1] = row[col_2]
elif row[col_2] in unknown:
row[col_2] = row[col_1]
return row
def combine_managements(df):
col_1 = 'scheme_management'
col_2 = 'management'
df[col_1] = df[col_1].fillna('na')
df[col_2] = df[col_2].fillna('na')
df[col_2] = df[col_2].map(lambda x: x.lower())
df[col_1] = df[col_1].map(lambda x: x.lower())
df = df.apply(lambda row: fill_unknown(row, col_1, col_2, ['na', 'other', 'none', 'unknown']), axis=1)
df['scheme_management/management'] = df.apply(lambda row: combiner(row, col_1, col_2), axis=1)
top = df['scheme_management/management'].value_counts()[df['scheme_management/management'].value_counts()>100]
df['scheme_management/management'] = df['scheme_management/management'].map(lambda x: x if x in top.index else 'binned')
df.drop([col_1, col_2], axis=1, inplace=True)
return df
def combine_waterpoint(df):
df['waterpoint_type/group'] = df.apply(lambda row: combiner(row, 'waterpoint_type', 'waterpoint_type_group'), axis=1)
df['waterpoint_type/group'].value_counts()
df.drop(['waterpoint_type', 'waterpoint_type_group'], axis=1, inplace=True)
return df
misspellings = {'dwe&': 'dwe',
'dwe': 'dwe',
'dwe/': 'dwe',
'dwe}': 'dwe',
'dw#': 'dwe',
'dw$': 'dwe',
'dw': 'dwe',
'dw e': 'dwe',
'dawe': 'dwe',
'dweb': 'dwe',
'government': 'central government',
'government of tanzania': 'central government',
'gove': 'central government',
'tanzanian government': 'central government',
'governme': 'central government',
'goverm': 'central government',
'tanzania government': 'central government',
'cental government': 'central government',
'gover': 'central government',
'centra government': 'central government',
'go': 'central government',
'centr': 'central government',
'central govt': 'central government',
'cebtral government': 'central government',
'governmen': 'central government',
'govern': 'central government',
'central government': 'central government',
'olgilai village community': 'community',
'maseka community': 'community',
'kitiangare village community': 'community',
'sekei village community': 'community',
'igolola community': 'community',
'comunity': 'community',
'mtuwasa and community': 'community',
'village community members': 'community',
'district community j': 'community',
'marumbo community': 'community',
'ngiresi village community': 'community',
'community': 'community',
'village community': 'community',
'commu': 'community',
'ilwilo community': 'community',
'communit': 'community',
'taboma/community': 'community',
'oldadai village community': 'community',
'villagers': 'community',
'kkkt': 'kkkt',
'kkkt dme': 'kkkt',
'kkkt-dioces ya pare': 'kkkt',
'kkkt katiti juu': 'kkkt',
'kkkt leguruki': 'kkkt',
'kkkt mareu': 'kkkt',
'kkkt ndrumangeni': 'kkkt',
'kk': 'kkkt',
'kkkt church': 'kkkt',
'kkkt kilinga': 'kkkt',
'kkkt canal': 'kkkt',
'kkt': 'kkkt',
'lutheran church': 'kkkt',
'luthe': 'kkkt',
'haidomu lutheran church': 'kkkt',
'world vision': 'world vision',
'world vission': 'world vision',
'world visiin': 'world vision',
'world division': 'world vision',
'world': 'world vision',
'world nk': 'world vision',
'district council': 'district council',
'district counci': 'district council',
'district council': 'district council',
'mbozi district council': 'district council',
'wb / district council': 'district council',
'mbulu district council': 'district council',
'serengeti district concil': 'district council',
'district water department': 'district council',
'tabora municipal council': 'district council',
'hesawa': 'hesawa',
'esawa': 'hesawa',
'hesaw': 'hesawa',
'unknown installer': 'unknown'}
def bin_installer(df):
"""
input: dataframe
output: returns a new dataframe with a new column, installer_binned, that | |
<reponame>raspberrypieman/brython
"""
Python implementation of the io module.
"""
import os
import abc
import codecs
import errno
# Import _thread instead of threading to reduce startup cost
try:
from _thread import allocate_lock as Lock
except ImportError:
from _dummy_thread import allocate_lock as Lock
# Brython-specific
from _io_classes import *
import _io_classes
_IOBase = _io_classes._IOBase
_RawIOBase = _io_classes._RawIOBase
_BufferedIOBase = _io_classes._BufferedIOBase
_TextIOBase = _io_classes._TextIOBase
SEEK_SET=0
SEEK_CUR=1
SEEK_END=2
valid_seek_flags = {0, 1, 2} # Hardwired values
if hasattr(os, 'SEEK_HOLE') :
valid_seek_flags.add(os.SEEK_HOLE)
valid_seek_flags.add(os.SEEK_DATA)
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
# Rebind for compatibility
BlockingIOError = BlockingIOError
def __open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file is
opened. It defaults to 'r' which means open for reading in text mode. Other
common values are 'w' for writing (truncating the file if it already
exists), 'x' for exclusive creation of a new file, and 'a' for appending
(which on some Unix systems, means that all writes append to the end of the
file regardless of the current seek position). In text mode, if encoding is
not specified the encoding used is platform dependent. (For reading and
writing raw bytes use binary mode and leave encoding unspecified.) The
available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by calling
*opener* with (*file*, *flags*). *opener* must return an open file
descriptor (passing os.open as *opener* results in functionality similar to
passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("axrwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
creating = "x" in modes
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if creating or writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if creating + reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (creating or reading or | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Flow direction data
# The `FlwdirRaster` object is at the core of the pyflwdir package.
# It contains gridded flow direction data, parsed to an actionable common format
# which describes the linear index of the next dowsntream cell.
# Currently `pyflwdir` supports two local flow direction (D8) data types
# according to the arcgis **D8** convention and pcraster **LDD** convention one
# global flow direction type according to the CaMa-Flood **NEXTXY** convention.
# Local flow direction data types describe the next downstream cell based on a
# local direction towards one of its neighboring cells, while global flow
# direction types describe the next downstream cell based on a global index.
import os
import geopandas as gpd
import pyflwdir
import rasterio
from rasterio import features
from rasterio.transform import Affine
import numpy as np
import netCDF4
import xarray as xr
# local libraries
from pyflwdir.gis_utils import affine_to_coords
from gislib.utils import xy_to_subidx, clip_bbox_global, build_vrt, vrt_props
# Default fill vals for netCDF
F8_FILLVAL = netCDF4.default_fillvals['f8']
F4_FILLVAL = netCDF4.default_fillvals['f4']
I4_FILLVAL = netCDF4.default_fillvals['i4']
I8_FILLVAL = netCDF4.default_fillvals['i8']
AUX_DATADIR = str(os.environ['AUX_DATADIR'])
OUTPUT_DIRECTORY = str(os.environ['OUTPUT_DIRECTORY'])
MEAN_ANNUAL_RUNOFF = str(os.environ['MEAN_ANNUAL_RUNOFF'])
# We read the flow direction and elevation raster data, including meta-data,
# using, [rasterio](https://rasterio.readthedocs.io/en/latest/)
dir_file = os.path.join(AUX_DATADIR, 'merit_rgn_dir.tif')
# wth_file = os.path.join(AUX_DATADIR, 'merit_rgn_wth.tif')
# elv_file = os.path.join(AUX_DATADIR, 'merit_rgn_elv.tif')
# elv_lr_file = os.path.join(AUX_DATADIR, 'merit_rgn_elv_lr.tif')
# frc_lr_file = os.path.join(AUX_DATADIR, 'merit_rgn_land_fraction_lr.tif')
# # slp_lr_file = os.path.join(AUX_DATADIR, 'merit_basin_slope_lr.tif')
with rasterio.open(dir_file, 'r') as src:
flwdir = src.read(1)
transform = src.transform
crs = src.crs
latlon = crs.to_epsg() == 4326
# with rasterio.open(wth_file, 'r') as src:
# rivwth = src.read(1)
# with rasterio.open(elv_file, 'r') as src:
# elevtn = src.read(1)
# with rasterio.open(elv_lr_file, 'r') as src:
# elevtn_lr = src.read(1)
# with rasterio.open(frc_lr_file, 'r') as src:
# frac_lr = src.read(1)
# # with rasterio.open(slp_lr_file, 'r') as src:
# # slp_lr = src.read(1)
# This is a requirement:
flwdir = flwdir.astype('uint8')
flw = pyflwdir.from_array(
flwdir, ftype='d8', transform=transform, latlon=latlon, cache=True
)
SCALE_FACTOR = int(os.environ['SCALE_FACTOR'])
# # SCALE_FACTOR = 120 # 0.0008333 -> 0.1 [TODO: user-specified]
# OUTLET_X = float(os.environ['OUTLET_X'])
# OUTLET_Y = float(os.environ['OUTLET_Y'])
# # OUTLET_X=-72.6271
# # OUTLET_Y=-13.0045
# OUTLET_COORDS = (OUTLET_X, OUTLET_Y)
def main():
# ================================== #
# 1 - Upscale flow direction
# ================================== #
# Apply scale factor to upscale flow direction map
# uparea = flw.upstream_area(unit='km2')
flwdir_lr, idxs_out = flw.upscale(
scale_factor=SCALE_FACTOR,
# uparea=uparea,
method='ihu'
)
# Retrieve validity flags
valid = flw.upscale_error(flwdir_lr, idxs_out)
# Write output
ftype = flw.ftype
shape_lr = flwdir_lr.shape
dims=('y','x')
transform_lr = Affine(
transform[0] * SCALE_FACTOR, transform[1], transform[2],
transform[3], transform[4] * SCALE_FACTOR, transform[5]
)
lon_vals, lat_vals = affine_to_coords(transform_lr, flwdir_lr.shape)
coords={'y': lat_vals, 'x': lon_vals}
# Flow direction map
da_flwdir = xr.DataArray(
name='flwdir',
data=flwdir_lr.to_array(ftype),
coords=coords,
dims=dims,
attrs=dict(
long_name=f'{ftype} flow direction',
_FillValue=flw._core._mv)
)
da_flwdir.rio.set_crs(4326)
da_flwdir.rio.to_raster(
os.path.join(
AUX_DATADIR,
"merit_flow_direction.tif"
)
)
# Downstream IDs
da_outidx = xr.DataArray(
name='outidx',
data=idxs_out,
coords=coords,
dims=dims,
attrs=dict(
long_name = 'subgrid outlet index',
# _FillValue = flw._core._mv
_FillValue = I4_FILLVAL
)
)
da_outidx.rio.set_crs(4326)
da_outidx.rio.to_raster(
os.path.join(
AUX_DATADIR,
"merit_subgrid_outlet_index.tif"
)
)
# # NOT USED:
# # flw = pyflwdir.from_array(
# # flwdir_lr.to_array(ftype), ftype='d8', transform=transform_lr, latlon=latlon, cache=True
# # )
# # # Translate outlet indices to global x,y coordinates
# # # NB `dir_file` is arbitrary - the routine uses it to
# # # extract the coordinates of each point
# # with xr.open_rasterio(elv_file) as template:
# # x_out, y_out = template.rio.idx_to_xy(
# # idxs_out,
# # mask = idxs_out != flw._core._mv
# # )
# # # Extract river basin at coarse resolution, then use as a mask
# # basins_lr = flwdir_lr.basins(xy=OUTLET_COORDS)
# # Recompute upstream area for the coarse resolution
# uparea_lr = flwdir_lr.upstream_area(unit='m2')
# # Get global cell id and downstream id
# ids = np.array([i for i in range(flwdir_lr.size)]).reshape(flwdir_lr.shape)
# ids = np.array(
# [i for i in range(flwdir_lr.size)], dtype=np.int32
# ).reshape(flwdir_lr.shape)
# # Start from 1, not zero
# ids += 1
# # Flip the array so that the index starts from the bottom left,
# # increasing from left to right fastest.
# ids = np.flipud(ids)
# # Get the flow direction map in terms of the NEXTXY global format
# nextxy = flwdir_lr.to_array('nextxy')
# nextx = nextxy[0,...]
# nexty = nextxy[1,...]
# ny, nx = flwdir_lr.shape
# # Preallocate output
# dn_id = np.zeros((flwdir_lr.shape))
# for i in range(ny):
# for j in range(nx):
# # account for zero indexing
# yi = nexty[i,j] - 1
# xi = nextx[i,j] - 1
# if (yi >= 0) & (xi >= 0):
# idx = ids[yi,xi]
# else:
# idx = -9999
# dn_id[i,j] = idx
# # Write output
# # # ================================== #
# # # 2 - Area
# # # ================================== #
# # # Get longitude/latitude coordinates
# # transform_lr = Affine(
# # transform[0] * SCALE_FACTOR, transform[1], transform[2],
# # transform[3], transform[4] * SCALE_FACTOR, transform[5]
# # )
# # lon_vals, lat_vals = affine_to_coords(transform_lr, flwdir_lr.shape)
# # # Compute grid area in radians
# # area_rad = np.zeros((ny, nx), dtype=np.float64)
# # area_m2 = np.zeros((ny, nx), dtype=np.float64)
# # R = 6371007.2 # Radius of the earth
# # for i in range(len(lon_vals)):
# # lon0 = (lon_vals[i] - transform_lr[0] / 2) * (np.pi / 180)
# # lon1 = (lon_vals[i] + transform_lr[0] / 2) * (np.pi / 180)
# # for j in range(len(lat_vals)):
# # lat0 = (lat_vals[j] + transform_lr[4] / 2) * (np.pi / 180)
# # lat1 = (lat_vals[j] - transform_lr[4] / 2) * (np.pi / 180)
# # area_rad[j,i] = (np.sin(lat1) - np.sin(lat0)) * (lon1 - lon0)
# # area_m2[j,i] = (np.sin(lat1) - np.sin(lat0)) * (lon1 - lon0) * R ** 2
# # # ================================== #
# # # 3 - Accumulate mean annual Q
# # # ================================== #
# # # We use this to estimate some channel parameters, based on
# # # empirical relationships
# # runoff_lr = xr.open_dataset(MEAN_ANNUAL_RUNOFF)['runoff']
# # # Convert m/y to m3/y
# # runoff_lr *= area_m2
# # # Convert m3/y to m3/s
# # runoff_lr /= (365 * 24 * 60 * 60)
# # Qmean_lr = flwdir_lr.accuflux(runoff_lr, direction="up")
# # Qmean_lr = Qmean_lr.astype(np.float64)
# # # ================================== #
# # # 3 - Compute channel bankfull depth/width
# # # ================================== #
# # # HyMAP (https://journals.ametsoc.org/view/journals/hydr/13/6/jhm-d-12-021_1.xml#bib8)
# # Beta = 18. # TODO: check this value
# # alpha = 3.73e-3
# # width_main = np.clip(Beta * Qmean_lr ** 0.5, 10., None).astype(np.float64)
# # depth_main = np.clip(alpha * width_main, 2., None).astype(np.float64)
# # width_main_floodplain = width_main * 3.
# # # Alternatives
# # # NB first two from https://ec-jrc.github.io/lisflood/pdfs/Dataset_hydro.pdf
# # # width_main = uparea ** 0.0032
# # # width_main = Qmean_lr ** 0.539
# # # width_main = flw.subgrid_rivavg(idxs_out, rivwth)
# # # Tributary bankfull depth/width (use gridbox runoff)
# # width_trib = np.clip(Beta * runoff_lr.values ** 0.5, 10., None).astype(np.float64)
# # depth_trib = np.clip(alpha * width_trib, 2., None).astype(np.float64)
# # # Manning channel/overland (LISFLOOD)
# # n_channel = (
# # 0.025 + 0.015
# # * np.clip(50. / (uparea_lr / 1000 / 1000), None, 1)
# # + 0.030 * np.clip(elevtn_lr / 2000., None, 1.)
# # ).astype(np.float64)
# # # Initial guess
# # n_overland = (np.ones_like(n_channel) * 0.03).astype(np.float64)
# # # ================================== #
# # # 4 - Compute river length/slope
# # # ================================== #
# # length_main = flw.subgrid_rivlen(idxs_out, direction="up").astype(np.float64)
# # slope_main = flw.subgrid_rivslp(idxs_out, elevtn).astype(np.float64)
# # # This is not implemented yet:
# # # rivslp2 = flw.subgrid_rivslp2(idxs_out, elevtn)
# # # MOSART makes tributary slope equal to main river slope
# # slope_trib = slope_main
# # # ================================== #
# # # 5 - Write output
# # # ================================== #
# # mask = ~(basins_lr.astype(bool))
# # # First output file defines the model grid
# # nco = netCDF4.Dataset(
# # os.path.join(OUTPUT_DIRECTORY, 'land.nc'), 'w', format='NETCDF4'
# # )
# # nco.createDimension('lat', len(lat_vals))
# # nco.createDimension('lon', len(lon_vals))
# # var | |
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces(self, base_space_id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces # noqa: E501
Checks tenants for matching variable set rule NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces_with_http_info(self, base_space_id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces # noqa: E501
Checks tenants for matching variable set rule NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces_with_http_info(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variable_set_test_action_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/variableset-test', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder # noqa: E501
Gets all the available variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_with_http_info(id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_with_http_info(self, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder # noqa: E501
Gets all the available variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/{id}/variables', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantVariableResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces # noqa: E501
Gets all the available variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces # noqa: E501
Gets all the available variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_get_responder_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/{id}/variables', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantVariableResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action # noqa: E501
Returns list of tenants who are missing required variables NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_with_http_info(**kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_with_http_info(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action # noqa: E501
Returns list of tenants who are missing required variables NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_missing_action_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
| |
mapping.
:vartype status: str
"""
_validation = {
'id': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccount, self).__init__(**kwargs)
self.id = kwargs['id']
self.identity = kwargs.get('identity', None)
self.status = None
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~video_analyzer.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~video_analyzer.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class TimeSequenceBase(msrest.serialization.Model):
"""A sequence of datetime ranges as a string.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoSequenceAbsoluteTimeMarkers.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'}
}
def __init__(
self,
**kwargs
):
super(TimeSequenceBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class TlsEndpoint(EndpointBase):
"""TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
:param trusted_certificates: List of trusted certificate authorities when authenticating a TLS
connection. A null list designates that Azure Video Analyzer's list of trusted authorities
should be used.
:type trusted_certificates: ~video_analyzer.models.CertificateSource
:param validation_options: Validation options to use when authenticating a TLS connection. By
default, strict validation is used.
:type validation_options: ~video_analyzer.models.TlsValidationOptions
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'},
'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'},
}
def __init__(
self,
**kwargs
):
super(TlsEndpoint, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str
self.trusted_certificates = kwargs.get('trusted_certificates', None)
self.validation_options = kwargs.get('validation_options', None)
class TlsValidationOptions(msrest.serialization.Model):
"""Options for controlling the validation of TLS endpoints.
:param ignore_hostname: When set to 'true' causes the certificate subject name validation to be
skipped. Default is 'false'.
:type ignore_hostname: str
:param ignore_signature: When set to 'true' causes the certificate chain trust validation to be
skipped. Default is 'false'.
:type ignore_signature: str
"""
_attribute_map = {
'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'},
'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TlsValidationOptions, self).__init__(**kwargs)
self.ignore_hostname = kwargs.get('ignore_hostname', None)
self.ignore_signature = kwargs.get('ignore_signature', None)
class TokenClaim(msrest.serialization.Model):
"""Properties for expected token claims.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the claim which must be present on the token.
:type name: str
:param value: Required. Expected value of the claim to be present on the token.
:type value: str
"""
_validation = {
'name': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TokenClaim, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs['value']
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class UnsecuredEndpoint(EndpointBase):
"""Unsecured endpoint describes an endpoint that the pipeline can connect to over clear transport (no encryption in transit).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
}
def __init__(
self,
**kwargs
):
super(UnsecuredEndpoint, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str
class UserAssignedManagedIdentity(msrest.serialization.Model):
"""The details of the user assigned managed identity used by the Video Analyzer resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar client_id: The client ID.
:vartype client_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
"""
_validation = {
'client_id': {'readonly': True},
'principal_id': {'readonly': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedManagedIdentity, self).__init__(**kwargs)
self.client_id = None
self.principal_id = None
class UsernamePasswordCredentials(CredentialsBase):
"""Username and password credentials.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param username: Required. Username to be presented as part of the credentials.
:type username: str
:param password: Required. Password to be presented as part of the credentials. It is
recommended that this value is parameterized as a secret string in order to prevent this value
to be returned as part of the resource on API requests.
:type password: str
"""
_validation = {
'type': {'required': True},
'username': {'required': True},
'password': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsernamePasswordCredentials, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str
self.username = kwargs['username']
self.password = kwargs['password']
class VideoAnalyzer(TrackedResource):
"""The Video | |
"""Models for tables not included in UCR, generated for our system
`models.py` is meant to simply reflect the tables as they exist in UCR
itself; `cdemodels.py` extends those model classes. *These* models, on
the other hand, must actually be generated in our system.
"""
from copy import deepcopy
import logging
from psycopg2 import ProgrammingError
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import backref, relationship
from sqlalchemy.sql.elements import BinaryExpression
from sqlalchemy import func, UniqueConstraint
from sqlalchemy.sql import sqltypes
from flask_restful import abort
from crime_data.common import models
from crime_data.common.base import ExplorerOffenseMapping
from crime_data.common.models import RefAgency, RefState, RefCounty
from crime_data.extensions import db
from sqlalchemy import or_,and_
# fixme. ugh!
MAX_AGENCY_YEAR = 2016
class FilterableModel:
@classmethod
def column_is_string(cls, col_name):
col = getattr(cls.__table__.c, col_name)
return isinstance(col.type, sqltypes.String)
@classmethod
def filtered(cls, filters, args=None):
args = args or []
qry = cls.query
# This could be generalized to other places in the future
if 'fields' in args:
fields = args['fields'].split(',')
qry = qry.with_entities(*fields).select_from(cls)
for filter in filters:
if isinstance(filter, BinaryExpression):
qry = qry.filter(filter)
else:
(col_name, comparitor, values) = filter
col = getattr(cls, col_name)
if cls.column_is_string(col_name):
col = func.lower(col)
operation = getattr(col, comparitor)
qry = qry.filter(or_(operation(v) for v in values)).order_by(
col)
if 'by' in args:
for col_name in args['by'].split(','):
col = getattr(cls, col_name)
qry = qry.order_by(col)
return qry
class AgencyParticipation(db.Model, FilterableModel):
"""Represents agency participation for a single month."""
__tablename__ = 'agency_participation'
year = db.Column(db.SmallInteger, nullable=False, primary_key=True)
state_name = db.Column(db.String)
state_abbr = db.Column(db.String)
agency_id = db.Column(db.Integer, nullable=False, primary_key=True)
agency_ori = db.Column(db.String)
agency_name = db.Column(db.String)
agency_population = db.Column(db.BigInteger)
population_group_code = db.Column(db.String)
population_group = db.Column(db.String)
reported = db.Column(db.SmallInteger, nullable=False)
months_reported = db.Column(db.SmallInteger, nullable=False)
nibrs_reported = db.Column(db.SmallInteger, nullable=False)
nibrs_months_reported = db.Column(db.SmallInteger, nullable=False)
covered = db.Column(db.SmallInteger)
participated = db.Column(db.SmallInteger)
nibrs_participated = db.Column(db.SmallInteger)
class ArsonSummary(db.Model):
__tablename__ = 'arson_summary'
arson_summary_id = db.Column(db.Integer, nullable=False, primary_key=True)
grouping_bitmap = db.Column(db.Integer)
year = db.Column(db.SmallInteger)
state_id = db.Column(db.Integer)
state_abbr = db.Column(db.Text)
agency_id = db.Column(db.Integer)
ori = db.Column(db.Text)
subcategory_code = db.Column(db.Text)
subcategory_name = db.Column(db.Text)
reported = db.Column(db.Integer)
unfounded = db.Column(db.Integer)
actual = db.Column(db.Integer)
cleared = db.Column(db.Integer)
juvenile_cleared = db.Column(db.Integer)
uninhabited = db.Column(db.Integer)
est_damage_value = db.Column(db.Integer)
class ParticipationRate(db.Model):
__tablename__ = 'participation_rates'
participation_id = db.Column(db.Integer, nullable=False, primary_key=True)
year = db.Column(db.SmallInteger, nullable=False)
state_id = db.Column(db.Integer,
db.ForeignKey(RefState.state_id,
deferrable=True,
initially='DEFERRED'),
nullable=True)
county_id = db.Column(db.Integer,
db.ForeignKey(RefCounty.county_id,
deferrable=True,
initially='DEFERRED'),
nullable=True)
state_name = db.Column(db.String)
county_name = db.Column(db.String)
total_agencies = db.Column(db.Integer)
participating_agencies = db.Column(db.Integer)
participation_rate = db.Column(db.Float)
nibrs_participating_agencies = db.Column(db.Integer)
nibrs_participation_rate = db.Column(db.Float)
covered_agencies = db.Column(db.Integer)
covered_rate = db.Column(db.Float)
total_population = db.Column(db.BigInteger)
participating_population = db.Column(db.BigInteger)
nibrs_participating_population = db.Column(db.BigInteger)
class CreatableModel:
@classmethod
def create(cls):
"""Creates database table for the model, unless it already exists."""
try:
cls.__table__.create(db.session.bind)
except ProgrammingError:
pass
class NibrsIncidentRepresentation(db.Model, CreatableModel):
__tablename__ = 'nibrs_incident_representation'
incident_representation_id = db.Column(db.BigInteger, primary_key=True)
incident_id = db.Column(db.BigInteger,
db.ForeignKey(models.NibrsIncident.incident_id))
representation = db.Column(JSONB)
incident = db.relationship(models.NibrsIncident,
uselist=False,
backref=backref('representation',
uselist=False))
@classmethod
def regenerate(cls):
"""Generates or replaces cached representations for all records."""
for incident in models.NibrsIncident.query:
if not incident.representation:
incident.representation = cls(incident=incident)
incident.representation.generate()
models.NibrsIncident.query.session.commit()
@classmethod
def fill(cls, batch_size=None):
"""Generates cached representations for records that lack them.
Using a `batch_size` helps for large operations that may fail."""
finished = False
batch_no = 0
while not finished:
finished = True
qry = models.NibrsIncident.query.filter(
models.NibrsIncident.representation == None).limit(batch_size)
for incident in qry:
finished = False # until the query comes back empty
incident.representation = cls(incident=incident)
incident.representation.generate()
models.NibrsIncident.query.session.commit()
logging.warning(
"Batch #{batch_no} of #{batch_size} complete".format(
batch_no=batch_no,
batch_size=batch_size))
batch_no += 1
def generate(self):
"""Generates and caches output for a single NibrsIncident."""
from crime_data.common import marshmallow_schemas
_schema = marshmallow_schemas.NibrsIncidentSchema()
self.representation = _schema.dump(self.incident).data
class RetaEstimated(db.Model):
"""
Estimated data loaded from a CSV data file created from published data
tables from the _Crime in the United States_ reports.
"""
__tablename__ = 'reta_estimated'
__table_args__ = (
UniqueConstraint('year', 'state_id'), )
estimate_id = db.Column(db.Integer, primary_key=True)
year = db.Column(db.SmallInteger)
state_id = db.Column(db.SmallInteger,
db.ForeignKey(RefState.state_id,
deferrable=True,
initially='DEFERRED'),
nullable=True)
state_abbr = db.Column(db.String(2))
population = db.Column(db.BigInteger)
violent_crime = db.Column(db.BigInteger)
homicide = db.Column(db.BigInteger)
rape_legacy = db.Column(db.BigInteger)
rape_revised = db.Column(db.BigInteger)
robbery = db.Column(db.BigInteger)
aggravated_assault = db.Column(db.BigInteger)
property_crime = db.Column(db.BigInteger)
burglary = db.Column(db.BigInteger)
larceny = db.Column(db.BigInteger)
motor_vehicle_theft = db.Column(db.BigInteger)
caveats = db.Column(db.Text)
state = relationship(RefState)
class ArrestsNational(db.Model):
"""Estimated data about national arrest totals"""
__tablename__ = 'asr_national'
id = db.Column(db.BigInteger, autoincrement=True, primary_key=True)
year = db.Column(db.SmallInteger)
population = db.Column(db.BigInteger)
total_arrests = db.Column(db.BigInteger)
homicide = db.Column(db.BigInteger)
rape = db.Column(db.BigInteger)
robbery = db.Column(db.BigInteger)
aggravated_assault = db.Column(db.BigInteger)
burglary = db.Column(db.BigInteger)
larceny = db.Column(db.BigInteger)
motor_vehicle_theft = db.Column(db.BigInteger)
arson = db.Column(db.BigInteger)
violent_crime = db.Column(db.BigInteger)
property_crime = db.Column(db.BigInteger)
other_assault = db.Column(db.BigInteger)
forgery = db.Column(db.BigInteger)
fraud = db.Column(db.BigInteger)
embezzlement = db.Column(db.BigInteger)
stolen_property = db.Column(db.BigInteger)
vandalism = db.Column(db.BigInteger)
weapons = db.Column(db.BigInteger)
prostitution = db.Column(db.BigInteger)
other_sex_offenses = db.Column(db.BigInteger)
drug_abuse = db.Column(db.BigInteger)
gambling = db.Column(db.BigInteger)
against_family = db.Column(db.BigInteger)
dui = db.Column(db.BigInteger)
liquor_laws = db.Column(db.BigInteger)
drunkenness = db.Column(db.BigInteger)
disorderly_conduct = db.Column(db.BigInteger)
vagrancy = db.Column(db.BigInteger)
other = db.Column(db.BigInteger)
suspicion = db.Column(db.BigInteger)
curfew_loitering = db.Column(db.BigInteger)
class AgencySums(db.Model):
__tablename__ = 'agency_sums_view'
id = db.Column(db.BigInteger, autoincrement=True, primary_key=True)
year = db.Column(db.SmallInteger)
agency_id = db.Column(db.BigInteger)
state_postal_abbr = db.Column(db.Text)
ori = db.Column(db.Text)
pub_agency_name = db.Column(db.Text)
offense_id = db.Column(db.BigInteger) # reta_offense_subcat
offense_subcat_id = db.Column(db.BigInteger)
offense_code = db.Column(db.Text) # reta_offense
offense_subcat_code = db.Column(db.Text)
offense_subcat_name = db.Column(db.Text)
offense_name = db.Column(db.Text)
reported = db.Column(db.BigInteger)
unfounded = db.Column(db.BigInteger)
actual = db.Column(db.BigInteger)
cleared = db.Column(db.BigInteger)
juvenile_cleared = db.Column(db.BigInteger)
def get(self, state = None, agency = None, year = None, county = None, explorer_offense = None):
"""Get Agency Sums given a state/year/county/agency ori, etc."""
query = AgencySums.query
if state:
query = query.filter(func.lower(AgencySums.state_postal_abbr) == state.lower())
if county:
subq = (db.session.query(models.RefAgencyCounty.agency_id)
.select_from(models.RefAgencyCounty)
.join(models.RefCounty, and_(models.RefAgencyCounty.county_id == models.RefCounty.county_id))
.filter(models.RefCounty.county_fips_code == county)
)
if year:
subq = subq.filter(models.RefAgencyCounty.data_year == year)
query = query.filter(AgencySums.agency_id.in_(subq.subquery()))
if agency:
query = query.filter(AgencySums.ori == agency)
if year:
query = query.filter(AgencySums.year == year)
if explorer_offense:
offense = ExplorerOffenseMapping(explorer_offense).reta_offense_code
query = query.filter(AgencySums.offense_code == offense)
query = query.filter(AgencySums.year <= MAX_AGENCY_YEAR)
query = query.filter(AgencyParticipation.year <= MAX_AGENCY_YEAR)
query = query.join(AgencyParticipation, and_(AgencyParticipation.agency_id == AgencySums.agency_id, AgencyParticipation.year == AgencySums.year)).filter(AgencyParticipation.months_reported == 12)
query = query.order_by(AgencySums.year.desc()) # Agency reported 12 Months.
return query
class AgencyOffenseCounts(db.Model):
__tablename__ = 'agency_offenses_view'
id = db.Column(db.BigInteger, autoincrement=True, primary_key=True)
year = db.Column(db.SmallInteger)
agency_id = db.Column(db.BigInteger)
state_postal_abbr = db.Column(db.Text)
ori = db.Column(db.Text)
pub_agency_name = db.Column(db.Text)
offense_id = db.Column(db.BigInteger) # reta_offense_subcat
offense_code = db.Column(db.Text) # reta_offense
offense_name = db.Column(db.Text)
reported = db.Column(db.BigInteger)
unfounded = db.Column(db.BigInteger)
actual = db.Column(db.BigInteger)
cleared = db.Column(db.BigInteger)
juvenile_cleared = db.Column(db.BigInteger)
def get(self, state = None, agency = None, year = None, county = None, explorer_offense = None):
"""Get Agency Sums given a state/year/county/agency ori, etc."""
query = AgencyOffenseCounts.query
if state:
query = query.filter(func.lower(AgencyOffenseCounts.state_postal_abbr) == state.lower())
if county:
subq = (db.session.query(models.RefAgencyCounty.agency_id)
.select_from(models.RefAgencyCounty)
.join(models.RefCounty, and_(models.RefAgencyCounty.county_id == models.RefCounty.county_id))
.filter(models.RefCounty.county_fips_code == county)
)
if year:
subq = subq.filter(models.RefAgencyCounty.data_year == year)
query = query.filter(AgencyOffenseCounts.agency_id.in_(subq.subquery()))
if agency:
query = query.filter(AgencyOffenseCounts.ori == agency)
if year:
query = query.filter(AgencyOffenseCounts.year == year)
if explorer_offense:
offense = ExplorerOffenseMapping(explorer_offense).reta_offense_code
query = query.filter(AgencyOffenseCounts.offense_code == offense)
query = query.join(AgencyParticipation,
and_(AgencyParticipation.agency_id == AgencyOffenseCounts.agency_id,
AgencyParticipation.year == AgencyOffenseCounts.year)).filter(AgencyParticipation.months_reported == 12)
query = query.order_by(AgencyOffenseCounts.year.desc()) # Agency reported 12 Months.
query = query.filter(AgencyOffenseCounts.year <= MAX_AGENCY_YEAR)
return query
class AgencyClassificationCounts(db.Model):
__tablename__ = 'agency_classification_view'
id = db.Column(db.BigInteger, autoincrement=True, primary_key=True)
year = db.Column(db.SmallInteger)
agency_id = db.Column(db.BigInteger)
state_postal_abbr = db.Column(db.Text)
ori = db.Column(db.Text)
pub_agency_name = db.Column(db.Text)
classification = db.Column(db.Text)
reported = db.Column(db.BigInteger)
unfounded = db.Column(db.BigInteger)
actual = db.Column(db.BigInteger)
cleared = db.Column(db.BigInteger)
juvenile_cleared = db.Column(db.BigInteger)
def get(self, state = None, agency = None, year = None, county = None, classification = None):
"""Get Agency Sums given a state/year/county/agency ori, etc."""
query = AgencyClassificationCounts.query
if state:
query = query.filter(func.lower(AgencyClassificationCounts.state_postal_abbr) == state.lower())
if county:
subq = (db.session.query(models.RefAgencyCounty.agency_id)
.select_from(models.RefAgencyCounty)
.join(models.RefCounty, and_(models.RefAgencyCounty.county_id == models.RefCounty.county_id))
.filter(models.RefCounty.county_fips_code == county)
)
if year:
subq = subq.filter(models.RefAgencyCounty.data_year == year)
query = query.filter(AgencyClassificationCounts.agency_id.in_(subq.subquery()))
if agency:
query = query.filter(AgencyClassificationCounts.ori == agency)
if year:
query = query.filter(AgencyClassificationCounts.year == year)
if classification:
query = query.filter(func.lower(AgencyClassificationCounts.classification) == func.lower(classification))
query = query.join(AgencyParticipation,
and_(AgencyParticipation.agency_id == AgencyClassificationCounts.agency_id,
AgencyParticipation.year == AgencyClassificationCounts.year)).filter(AgencyParticipation.months_reported == 12)
query = query.order_by(AgencyClassificationCounts.year.desc()) # Agency reported 12 Months.
query = query.filter(AgencyClassificationCounts.year <= MAX_AGENCY_YEAR)
return query
class CdeAgency(db.Model, FilterableModel):
"""A class for the denormalized cde_agencies table"""
__tablename__ = 'cde_agencies'
__table_args__ = (UniqueConstraint('agency_id'), )
agency_id = db.Column(db.BigInteger, primary_key=True)
ori = db.Column(db.String(9))
legacy_ori = db.Column(db.String(9))
agency_name = db.Column(db.Text)
short_name = db.Column(db.Text)
agency_type_id = db.Column(db.String(1))
agency_type_name = db.Column(db.String(100))
# FIXME: can add associations when we need them
tribe_id = db.Column(db.BigInteger)
campus_id = db.Column(db.BigInteger)
city_id = db.Column(db.BigInteger)
city_name = db.Column(db.Text)
state_id = db.Column(db.SmallInteger)
state_abbr = db.Column(db.String(2))
primary_county_id | |
"""
********************************************************************************
* Name: map_manager
* Author: nswain
* Created On: August 30, 2018
* Copyright: (c) Aquaveo 2018
********************************************************************************
"""
import copy
import json
from abc import ABCMeta, abstractmethod
from tethys_gizmos.gizmo_options import MVView, MVLayer
from tethysext.atcore.services.color_ramps import COLOR_RAMPS
import collections
class MapManagerBase(object):
"""
Base class for object that orchestrates the map layers and resources.
"""
__metaclass__ = ABCMeta
MAX_ZOOM = 28
MIN_ZOOM = 0
DEFAULT_ZOOM = 4
DEFAULT_CENTER = [-98.583, 39.833]
SEED_ZOOM_START = 11
SEED_ZOOM_END = 14
LAYER_SOURCE_TYPE = 'TileWMS'
COLOR_RAMPS = COLOR_RAMPS
DEFAULT_TILE_GRID = {
'resolutions': [
156543.03390625,
78271.516953125,
39135.7584765625,
19567.87923828125,
9783.939619140625,
4891.9698095703125,
2445.9849047851562,
1222.9924523925781,
611.4962261962891,
305.74811309814453,
152.87405654907226,
76.43702827453613,
38.218514137268066,
19.109257068634033,
9.554628534317017,
4.777314267158508,
2.388657133579254,
1.194328566789627,
0.5971642833948135,
0.2985821416974068,
0.1492910708487034,
0.0746455354243517,
0.0373227677121758,
0.0186613838560879,
0.009330691928044,
0.004665345964022,
0.002332672982011,
0.0011663364910055,
0.0005831682455027,
0.0002915841227514,
0.0001457920613757
],
'extent': [-20037508.34, -20037508.34, 20037508.34, 20037508.34],
'origin': [0.0, 0.0],
'tileSize': [256, 256]
}
_DEFAULT_POPUP_EXCLUDED_PROPERTIES = ['id', 'type', 'layer_name']
def __init__(self, spatial_manager, model_db):
self.spatial_manager = spatial_manager
self.model_db = model_db
self._map_extent = None
self._default_view = None
@property
def map_extent(self):
if not self._map_extent:
view, extent = self.get_map_extent()
self._map_extent = extent
return self._map_extent
@property
def default_view(self):
if not self._default_view:
view, extent = self.get_map_extent()
self._default_view = view
return self._default_view
@abstractmethod
def compose_map(self, request, *args, **kwargs):
"""
Compose the MapView object.
Args:
request(HttpRequest): A Django request object.
Returns:
MapView, 4-list<float>: The MapView and extent objects.
"""
def get_cesium_token(self):
"""
Get the cesium token for Cesium Views
Returns:
str: The cesium API token
"""
return ''
def build_param_string(self, **kwargs):
"""
Build a VIEWPARAMS or ENV string with given kwargs (e.g.: 'foo:1;bar:baz')
Args:
**kwargs: key-value pairs of paramaters.
Returns:
str: parameter string.
"""
if not kwargs:
return ''
joined_pairs = []
for k, v in kwargs.items():
joined_pairs.append(':'.join([k, str(v)]))
param_string = ';'.join(joined_pairs)
return param_string
def build_geojson_layer(self, geojson, layer_name, layer_title, layer_variable, layer_id='', visible=True,
public=True, selectable=False, plottable=False, has_action=False, extent=None,
popup_title=None, excluded_properties=None, show_download=False):
"""
Build an MVLayer object with supplied arguments.
Args:
geojson(dict): Python equivalent GeoJSON FeatureCollection.
layer_name(str): Name of GeoServer layer (e.g.: agwa:3a84ff62-aaaa-bbbb-cccc-1a2b3c4d5a6b7c8d-model_boundaries).
layer_title(str): Title of MVLayer (e.g.: Model Boundaries).
layer_variable(str): Variable type of the layer (e.g.: model_boundaries).
layer_id(UUID, int, str): layer_id for non geoserver layer where layer_name may not be unique.
visible(bool): Layer is visible when True. Defaults to True.
public(bool): Layer is publicly accessible when app is running in Open Portal Mode if True. Defaults to True.
selectable(bool): Enable feature selection. Defaults to False.
plottable(bool): Enable "Plot" button on pop-up properties. Defaults to False.
has_action(bool): Enable "Action" button on pop-up properties. Defaults to False.
extent(list): Extent for the layer. Optional.
popup_title(str): Title to display on feature popups. Defaults to layer title.
excluded_properties(list): List of properties to exclude from feature popups.
show_download(boolean): enable download geojson as shapefile. Default is False.
Returns:
MVLayer: the MVLayer object.
""" # noqa: E501
# Define default styles for layers
style_map = self.get_vector_style_map()
# Bind geometry features to layer via layer name
for feature in geojson['features']:
feature['properties']['layer_name'] = layer_name
mv_layer = self._build_mv_layer(
layer_source='GeoJSON',
layer_id=layer_id,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=geojson,
extent=extent,
visible=visible,
public=public,
selectable=selectable,
plottable=plottable,
has_action=has_action,
popup_title=popup_title,
excluded_properties=excluded_properties,
style_map=style_map,
show_download=show_download,
)
return mv_layer
def build_cesium_layer(self, cesium_type, cesium_json, layer_name, layer_title, layer_variable, layer_id='',
visible=True, public=True, selectable=False, plottable=False, has_action=False, extent=None,
popup_title=None, excluded_properties=None, show_download=False):
"""
Build an MVLayer object with supplied arguments.
Args:
cesium_type(enum): 'CesiumModel' or 'CesiumPrimitive'.
cesium_json(dict): Cesium dictionary to describe the layer.
layer_name(str): Name of GeoServer layer (e.g.: agwa:3a84ff62-aaaa-bbbb-cccc-1a2b3c4d5a6b7c8d-model_boundaries).
layer_title(str): Title of MVLayer (e.g.: Model Boundaries).
layer_variable(str): Variable type of the layer (e.g.: model_boundaries).
layer_id(UUID, int, str): layer_id for non geoserver layer where layer_name may not be unique.
visible(bool): Layer is visible when True. Defaults to True.
public(bool): Layer is publicly accessible when app is running in Open Portal Mode if True. Defaults to True.
selectable(bool): Enable feature selection. Defaults to False.
plottable(bool): Enable "Plot" button on pop-up properties. Defaults to False.
has_action(bool): Enable "Action" button on pop-up properties. Defaults to False.
extent(list): Extent for the layer. Optional.
popup_title(str): Title to display on feature popups. Defaults to layer title.
excluded_properties(list): List of properties to exclude from feature popups.
show_download(boolean): enable download geojson as shapefile. Default is False.
Returns:
MVLayer: the MVLayer object.
""" # noqa: E501
# Define default styles for layers
style_map = self.get_vector_style_map()
if cesium_type not in ['CesiumModel', 'CesiumPrimitive']:
raise ValueError('Invalid cesium_type. Must be "CesiumModel" or "CesiumPrimitive".')
mv_layer = self._build_mv_layer(
layer_source=cesium_type,
layer_id=layer_id,
layer_name=layer_name,
layer_title=layer_title,
layer_variable=layer_variable,
options=cesium_json,
extent=extent,
visible=visible,
public=public,
selectable=selectable,
plottable=plottable,
has_action=has_action,
popup_title=popup_title,
excluded_properties=excluded_properties,
style_map=style_map,
show_download=show_download,
)
return mv_layer
def build_wms_layer(self, endpoint, layer_name, layer_title, layer_variable, viewparams=None, env=None,
visible=True, tiled=True, selectable=False, plottable=False, has_action=False, extent=None,
public=True, geometry_attribute='geometry', layer_id='', excluded_properties=None,
popup_title=None, color_ramp_division_kwargs=None, times=None):
"""
Build an WMS MVLayer object with supplied arguments.
Args:
endpoint(str): URL to GeoServer WMS interface.
layer_name(str): Name of GeoServer layer (e.g.: agwa:3a84ff62-aaaa-bbbb-cccc-1a2b3c4d5a6b7c8d-model_boundaries).
layer_title(str): Title of MVLayer (e.g.: Model Boundaries).
layer_variable(str): Variable type of the layer (e.g.: model_boundaries).
layer_id(UUID, int, str): layer_id for non geoserver layer where layer_name may not be unique.
viewparams(str): VIEWPARAMS string.
env(str): ENV string.
visible(bool): Layer is visible when True. Defaults to True.
public(bool): Layer is publicly accessible when app is running in Open Portal Mode if True. Defaults to True.
tiled(bool): Configure as tiled layer if True. Defaults to True.
selectable(bool): Enable feature selection. Defaults to False.
plottable(bool): Enable "Plot" button on pop-up properties. Defaults to False.
has_action(bool): Enable "Action" button on pop-up properties. Defaults to False.
extent(list): Extent for the layer. Optional.
popup_title(str): Title to display on feature popups. Defaults to layer title.
excluded_properties(list): List of properties to exclude from feature popups.
geometry_attribute(str): Name of the geometry attribute. Defaults to "geometry".
color_ramp_division_kwargs(dict): arguments from map_manager.generate_custom_color_ramp_divisions
times (list): List of time steps if layer is time-enabled. Times should be represented as strings in ISO 8601 format (e.g.: ["20210322T112511Z", "20210322T122511Z", "20210322T132511Z"]). Currently only supported in CesiumMapView.
Returns:
MVLayer: the MVLayer object.
""" # noqa: E501
# Build params
params = {}
params['LAYERS'] = layer_name
if tiled:
params.update({
'TILED': True,
'TILESORIGIN': '0.0,0.0'
})
if viewparams:
params['VIEWPARAMS'] = viewparams
if env:
params['ENV'] = env
if times:
times = json.dumps(times),
# Build options
options = {
'url': endpoint,
'params': params,
'serverType': 'geoserver',
'crossOrigin': 'anonymous',
}
if color_ramp_division_kwargs:
# Create color ramp and add them to ENV
color_ramp_divisions = self.generate_custom_color_ramp_divisions(**color_ramp_division_kwargs)
if 'ENV' in params.keys():
if params['ENV']:
params['ENV'] += ";" + self.build_param_string(**color_ramp_divisions)
else:
params['ENV'] = self.build_param_string(**color_ramp_divisions)
else:
params['ENV'] = self.build_param_string(**color_ramp_divisions)
layer_source = 'TileWMS' if tiled else 'ImageWMS'
if tiled:
options['tileGrid'] = self.DEFAULT_TILE_GRID
mv_layer = self._build_mv_layer(
layer_id=layer_id,
layer_name=layer_name,
layer_source=layer_source,
layer_title=layer_title,
layer_variable=layer_variable,
options=options,
extent=extent,
visible=visible,
public=public,
selectable=selectable,
plottable=plottable,
has_action=has_action,
popup_title=popup_title,
excluded_properties=excluded_properties,
geometry_attribute=geometry_attribute,
times=times,
)
return mv_layer
def build_arc_gis_layer(self, endpoint, layer_name, layer_title, layer_variable, viewparams=None, env=None,
visible=True, tiled=True, selectable=False, plottable=False, has_action=False, extent=None,
public=True, geometry_attribute='geometry', layer_id='', excluded_properties=None,
popup_title=None):
"""
Build an AcrGIS Map Server MVLayer object with supplied arguments.
Args:
endpoint(str): URL to GeoServer WMS interface.
layer_name(str): Name of GeoServer layer (e.g.: agwa:3a84ff62-aaaa-bbbb-cccc-1a2b3c4d5a6b7c8d-model_boundaries).
layer_title(str): Title of MVLayer (e.g.: Model Boundaries).
layer_variable(str): Variable type of the layer (e.g.: model_boundaries).
layer_id(UUID, int, str): layer_id for non geoserver layer where layer_name may not be unique.
viewparams(str): VIEWPARAMS string.
env(str): ENV string.
visible(bool): Layer is visible when True. Defaults to True.
public(bool): Layer is publicly accessible when app is running in Open Portal Mode if True. Defaults to True.
tiled(bool): Configure as tiled layer if True. Defaults to True.
selectable(bool): Enable feature selection. Defaults to False.
plottable(bool): Enable "Plot" button on pop-up properties. Defaults to False.
has_action(bool): Enable "Action" button on pop-up properties. Defaults to False.
extent(list): Extent for the layer. Optional.
popup_title(str): Title to display on feature popups. Defaults to layer title.
excluded_properties(list): List of properties to exclude from feature popups.
geometry_attribute(str): Name of the geometry attribute. Defaults to "geometry".
Returns:
MVLayer: the MVLayer object.
""" # noqa: E501
# Build options
options = {
'url': endpoint,
'serverType': 'geoserver',
'crossOrigin': 'anonymous'
}
layer_source = 'TileArcGISRest'
mv_layer = self._build_mv_layer(
layer_id=layer_id,
layer_name=layer_name,
layer_source=layer_source,
layer_title=layer_title,
layer_variable=layer_variable,
options=options,
extent=extent,
visible=visible,
public=public,
selectable=selectable,
plottable=plottable,
has_action=has_action,
popup_title=popup_title,
excluded_properties=excluded_properties,
geometry_attribute=geometry_attribute
)
return mv_layer
def _build_mv_layer(self, layer_source, layer_name, layer_title, layer_variable, options, layer_id=None,
extent=None, visible=True, public=True, selectable=False, plottable=False, has_action=False,
excluded_properties=None, popup_title=None, geometry_attribute=None, style_map=None,
show_download=False, times=None):
"""
Build an MVLayer object with supplied arguments.
Args:
layer_source(str): OpenLayers Source to use | |
self._subscription_other_error_delay = ClientGUITime.TimeDeltaButton( misc, min = delay_min, days = True, hours = True, minutes = True, seconds = True )
self._downloader_network_error_delay = ClientGUITime.TimeDeltaButton( misc, min = delay_min, days = True, hours = True, minutes = True, seconds = True )
#
gallery_page_tt = 'Gallery page fetches are heavy requests with unusual fetch-time requirements. It is important they not wait too long, but it is also useful to throttle them:'
gallery_page_tt += os.linesep * 2
gallery_page_tt += '- So they do not compete with file downloads for bandwidth, leading to very unbalanced 20/4400-type queues.'
gallery_page_tt += os.linesep
gallery_page_tt += '- So you do not get 1000 items in your queue before realising you did not like that tag anyway.'
gallery_page_tt += os.linesep
gallery_page_tt += '- To give servers a break (some gallery pages can be CPU-expensive to generate).'
gallery_page_tt += os.linesep * 2
gallery_page_tt += 'These delays/lots are per-domain.'
gallery_page_tt += os.linesep * 2
gallery_page_tt += 'If you do not understand this stuff, you can just leave it alone.'
self._gallery_page_wait_period_pages.setValue( self._new_options.GetInteger( 'gallery_page_wait_period_pages' ) )
self._gallery_page_wait_period_pages.setToolTip( gallery_page_tt )
self._gallery_file_limit.SetValue( HC.options['gallery_file_limit'] )
self._highlight_new_query.setChecked( self._new_options.GetBoolean( 'highlight_new_query' ) )
self._gallery_page_wait_period_subscriptions.setValue( self._new_options.GetInteger( 'gallery_page_wait_period_subscriptions' ) )
self._gallery_page_wait_period_subscriptions.setToolTip( gallery_page_tt )
self._max_simultaneous_subscriptions.setValue( self._new_options.GetInteger( 'max_simultaneous_subscriptions' ) )
self._subscription_file_error_cancel_threshold.SetValue( self._new_options.GetNoneableInteger( 'subscription_file_error_cancel_threshold' ) )
self._process_subs_in_random_order.setChecked( self._new_options.GetBoolean( 'process_subs_in_random_order' ) )
self._pause_character.setText( self._new_options.GetString( 'pause_character' ) )
self._stop_character.setText( self._new_options.GetString( 'stop_character' ) )
self._show_new_on_file_seed_short_summary.setChecked( self._new_options.GetBoolean( 'show_new_on_file_seed_short_summary' ) )
self._show_deleted_on_file_seed_short_summary.setChecked( self._new_options.GetBoolean( 'show_deleted_on_file_seed_short_summary' ) )
self._watcher_page_wait_period.setValue( self._new_options.GetInteger( 'watcher_page_wait_period' ) )
self._watcher_page_wait_period.setToolTip( gallery_page_tt )
self._highlight_new_watcher.setChecked( self._new_options.GetBoolean( 'highlight_new_watcher' ) )
self._subscription_network_error_delay.SetValue( self._new_options.GetInteger( 'subscription_network_error_delay' ) )
self._subscription_other_error_delay.SetValue( self._new_options.GetInteger( 'subscription_other_error_delay' ) )
self._downloader_network_error_delay.SetValue( self._new_options.GetInteger( 'downloader_network_error_delay' ) )
#
rows = []
rows.append( ( 'Default download source:', self._default_gug ) )
rows.append( ( 'If new query entered and no current highlight, highlight the new query:', self._highlight_new_query ) )
rows.append( ( 'Additional fixed time (in seconds) to wait between gallery page fetches:', self._gallery_page_wait_period_pages ) )
rows.append( ( 'By default, stop searching once this many files are found:', self._gallery_file_limit ) )
gridbox = ClientGUICommon.WrapInGrid( gallery_downloader, rows )
gallery_downloader.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
rows = []
rows.append( ( 'Additional fixed time (in seconds) to wait between gallery page fetches:', self._gallery_page_wait_period_subscriptions ) )
rows.append( ( 'Maximum number of subscriptions that can sync simultaneously:', self._max_simultaneous_subscriptions ) )
rows.append( ( 'If a subscription has this many failed file imports, stop and continue later:', self._subscription_file_error_cancel_threshold ) )
rows.append( ( 'Sync subscriptions in random order:', self._process_subs_in_random_order ) )
gridbox = ClientGUICommon.WrapInGrid( subscriptions, rows )
subscriptions.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
subscriptions.Add( self._subscription_checker_options, CC.FLAGS_EXPAND_PERPENDICULAR )
#
rows = []
rows.append( ( 'Additional fixed time (in seconds) to wait between watcher checks:', self._watcher_page_wait_period ) )
rows.append( ( 'If new watcher entered and no current highlight, highlight the new watcher:', self._highlight_new_watcher ) )
gridbox = ClientGUICommon.WrapInGrid( watchers, rows )
watchers.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
watchers.Add( self._watcher_checker_options, CC.FLAGS_EXPAND_PERPENDICULAR )
#
rows = []
rows.append( ( 'Pause character:', self._pause_character ) )
rows.append( ( 'Stop character:', self._stop_character ) )
rows.append( ( 'Show a \'N\' (for \'new\') count on short file import summaries:', self._show_new_on_file_seed_short_summary ) )
rows.append( ( 'Show a \'D\' (for \'deleted\') count on short file import summaries:', self._show_deleted_on_file_seed_short_summary ) )
rows.append( ( 'Delay time on a gallery/watcher network error:', self._downloader_network_error_delay ) )
rows.append( ( 'Delay time on a subscription network error:', self._subscription_network_error_delay ) )
rows.append( ( 'Delay time on a subscription other error:', self._subscription_other_error_delay ) )
gridbox = ClientGUICommon.WrapInGrid( misc, rows )
misc.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, gallery_downloader, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, subscriptions, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, watchers, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, misc, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, QW.QWidget( self ), CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def UpdateOptions( self ):
HG.client_controller.network_engine.domain_manager.SetDefaultGUGKeyAndName( self._default_gug.GetValue() )
self._new_options.SetInteger( 'gallery_page_wait_period_pages', self._gallery_page_wait_period_pages.value() )
HC.options[ 'gallery_file_limit' ] = self._gallery_file_limit.GetValue()
self._new_options.SetBoolean( 'highlight_new_query', self._highlight_new_query.isChecked() )
self._new_options.SetInteger( 'gallery_page_wait_period_subscriptions', self._gallery_page_wait_period_subscriptions.value() )
self._new_options.SetInteger( 'max_simultaneous_subscriptions', self._max_simultaneous_subscriptions.value() )
self._new_options.SetNoneableInteger( 'subscription_file_error_cancel_threshold', self._subscription_file_error_cancel_threshold.GetValue() )
self._new_options.SetBoolean( 'process_subs_in_random_order', self._process_subs_in_random_order.isChecked() )
self._new_options.SetInteger( 'watcher_page_wait_period', self._watcher_page_wait_period.value() )
self._new_options.SetBoolean( 'highlight_new_watcher', self._highlight_new_watcher.isChecked() )
self._new_options.SetDefaultWatcherCheckerOptions( self._watcher_checker_options.GetValue() )
self._new_options.SetDefaultSubscriptionCheckerOptions( self._subscription_checker_options.GetValue() )
self._new_options.SetString( 'pause_character', self._pause_character.text() )
self._new_options.SetString( 'stop_character', self._stop_character.text() )
self._new_options.SetBoolean( 'show_new_on_file_seed_short_summary', self._show_new_on_file_seed_short_summary.isChecked() )
self._new_options.SetBoolean( 'show_deleted_on_file_seed_short_summary', self._show_deleted_on_file_seed_short_summary.isChecked() )
self._new_options.SetInteger( 'subscription_network_error_delay', self._subscription_network_error_delay.GetValue() )
self._new_options.SetInteger( 'subscription_other_error_delay', self._subscription_other_error_delay.GetValue() )
self._new_options.SetInteger( 'downloader_network_error_delay', self._downloader_network_error_delay.GetValue() )
class _DuplicatesPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#
weights_panel = ClientGUICommon.StaticBox( self, 'duplicate filter comparison score weights' )
self._duplicate_comparison_score_higher_jpeg_quality = QP.MakeQSpinBox( weights_panel, min=0, max=100 )
self._duplicate_comparison_score_much_higher_jpeg_quality = QP.MakeQSpinBox( weights_panel, min=0, max=100 )
self._duplicate_comparison_score_higher_filesize = QP.MakeQSpinBox( weights_panel, min=0, max=100 )
self._duplicate_comparison_score_much_higher_filesize = QP.MakeQSpinBox( weights_panel, min=0, max=100 )
self._duplicate_comparison_score_higher_resolution = QP.MakeQSpinBox( weights_panel, min=0, max=100 )
self._duplicate_comparison_score_much_higher_resolution = QP.MakeQSpinBox( weights_panel, min=0, max=100 )
self._duplicate_comparison_score_more_tags = QP.MakeQSpinBox( weights_panel, min=0, max=100 )
self._duplicate_comparison_score_older = QP.MakeQSpinBox( weights_panel, min=0, max=100 )
self._duplicate_filter_max_batch_size = QP.MakeQSpinBox( self, min = 10, max = 1024 )
#
self._duplicate_comparison_score_higher_jpeg_quality.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_higher_jpeg_quality' ) )
self._duplicate_comparison_score_much_higher_jpeg_quality.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_much_higher_jpeg_quality' ) )
self._duplicate_comparison_score_higher_filesize.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_higher_filesize' ) )
self._duplicate_comparison_score_much_higher_filesize.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_much_higher_filesize' ) )
self._duplicate_comparison_score_higher_resolution.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_higher_resolution' ) )
self._duplicate_comparison_score_much_higher_resolution.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_much_higher_resolution' ) )
self._duplicate_comparison_score_more_tags.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_more_tags' ) )
self._duplicate_comparison_score_older.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_older' ) )
self._duplicate_filter_max_batch_size.setValue( self._new_options.GetInteger( 'duplicate_filter_max_batch_size' ) )
#
rows = []
rows.append( ( 'Score for jpeg with non-trivially higher jpeg quality:', self._duplicate_comparison_score_higher_jpeg_quality ) )
rows.append( ( 'Score for jpeg with significantly higher jpeg quality:', self._duplicate_comparison_score_much_higher_jpeg_quality ) )
rows.append( ( 'Score for file with non-trivially higher filesize:', self._duplicate_comparison_score_higher_filesize ) )
rows.append( ( 'Score for file with significantly higher filesize:', self._duplicate_comparison_score_much_higher_filesize ) )
rows.append( ( 'Score for file with higher resolution (as num pixels):', self._duplicate_comparison_score_higher_resolution ) )
rows.append( ( 'Score for file with significantly higher resolution (as num pixels):', self._duplicate_comparison_score_much_higher_resolution ) )
rows.append( ( 'Score for file with more tags:', self._duplicate_comparison_score_more_tags ) )
rows.append( ( 'Score for file with non-trivially earlier import time:', self._duplicate_comparison_score_older ) )
gridbox = ClientGUICommon.WrapInGrid( weights_panel, rows )
label = 'When processing potential duplicate pairs in the duplicate filter, the client tries to present the \'best\' file first. It judges the two files on a variety of potential differences, each with a score. The file with the greatest total score is presented first. Here you can tinker with these scores.'
st = ClientGUICommon.BetterStaticText( weights_panel, label )
st.setWordWrap( True )
weights_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
weights_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, weights_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'Max size of duplicate filter pair batches:', self._duplicate_filter_max_batch_size ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, QW.QWidget( self ), CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def UpdateOptions( self ):
self._new_options.SetInteger( 'duplicate_comparison_score_higher_jpeg_quality', self._duplicate_comparison_score_higher_jpeg_quality.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_much_higher_jpeg_quality', self._duplicate_comparison_score_much_higher_jpeg_quality.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_higher_filesize', self._duplicate_comparison_score_higher_filesize.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_much_higher_filesize', self._duplicate_comparison_score_much_higher_filesize.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_higher_resolution', self._duplicate_comparison_score_higher_resolution.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_much_higher_resolution', self._duplicate_comparison_score_much_higher_resolution.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_more_tags', self._duplicate_comparison_score_more_tags.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_older', self._duplicate_comparison_score_older.value() )
self._new_options.SetInteger( 'duplicate_filter_max_batch_size', self._duplicate_filter_max_batch_size.value() )
class _DefaultFileSystemPredicatesPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
self._always_show_system_everything = QW.QCheckBox( 'show system:everything even if total files is over 10,000', self )
self._always_show_system_everything.setChecked( self._new_options.GetBoolean( 'always_show_system_everything' ) )
self._filter_inbox_and_archive_predicates = QW.QCheckBox( 'hide inbox and archive system predicates if either has no files', self )
self._filter_inbox_and_archive_predicates.setChecked( self._new_options.GetBoolean( 'filter_inbox_and_archive_predicates' ) )
self._file_system_predicate_age = ClientGUISearch.PanelPredicateSystemAgeDelta( self )
self._file_system_predicate_duration = ClientGUISearch.PanelPredicateSystemDuration( self )
self._file_system_predicate_height = ClientGUISearch.PanelPredicateSystemHeight( self )
self._file_system_predicate_limit = ClientGUISearch.PanelPredicateSystemLimit( self )
self._file_system_predicate_mime = ClientGUISearch.PanelPredicateSystemMime( self )
self._file_system_predicate_num_pixels = ClientGUISearch.PanelPredicateSystemNumPixels( self )
self._file_system_predicate_num_tags = ClientGUISearch.PanelPredicateSystemNumTags( self )
self._file_system_predicate_num_words = ClientGUISearch.PanelPredicateSystemNumWords( self )
self._file_system_predicate_ratio = ClientGUISearch.PanelPredicateSystemRatio( self )
self._file_system_predicate_similar_to = ClientGUISearch.PanelPredicateSystemSimilarTo( self )
self._file_system_predicate_size = ClientGUISearch.PanelPredicateSystemSize( self )
self._file_system_predicate_width = ClientGUISearch.PanelPredicateSystemWidth( self )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._always_show_system_everything, | |
'Ødot;': '\uebcd',
'&oogondotbl;': '\ue608', '&Oogondotbl;': '\ue208', 'ö': 'ö',
'Ö': 'Ö', '&odiaguml;': '\ue8d7', 'öacute;': '\ue62c',
'ó': 'ó', 'Ó': 'Ó', 'øacute;': 'ǿ',
'Øacute;': 'Ǿ', 'ødblac;': '\uebc7',
'Ødblac;': '\uebc6', '&oogonacute;': '\ue60c',
'&Oogonacute;': '\ue20c', 'øogonacute;': '\ue657',
'Øogonacute;': '\ue257', 'ő': 'ő', 'Ő': 'Ő',
'&odotacute;': '\uebf9', '&Odotacute;': '\uebf8',
'&oogondotacute;': '\uebfb', '&Oogondotacute;': '\uebfa',
'ødotacute;': '\uebfd', 'Ødotacute;': '\uebfc',
'&oogondblac;': '\uebc5', '&Oogondblac;': '\uebc4', 'ò': 'ò',
'Ò': 'Ò', 'ô': 'ô', 'Ô': 'Ô',
'öcirc;': '\ue62d', 'Öcirc;': '\ue22d',
'&oogoncirc;': '\ue60e', '&ocar;': 'ǒ', '&Ocar;': 'Ǒ',
'õ': 'õ', 'Õ': 'Õ', '&oring;': '\ue637', '&ohook;': 'ỏ',
'&Ohook;': 'Ỏ', '&obreve;': 'ŏ', '&Obreve;': 'Ŏ',
'øbreve;': '\uebef', 'Øbreve;': '\uebee', 'ō': 'ō',
'Ō': 'Ō', 'ømacr;': '\ue652', 'Ømacr;': '\ue252',
'&omacrbreve;': '\ue61b', '&Omacrbreve;': '\ue21b',
'ømacrbreve;': '\ue653', 'Ømacrbreve;': '\ue253',
'&omacracute;': 'ṓ', '&Omacracute;': 'Ṓ',
'ømacracute;': '\uebed', 'Ømacracute;': '\uebec',
'ömacr;': 'ȫ', 'Ömacr;': 'Ȫ', '&oclig;': '\uefad',
'œ': 'œ', 'Œ': 'Œ', '&oeligscap;': 'ɶ',
'&oeligenl;': '\uefdd', '&Oloop;': 'Ꝍ', '&oloop;': 'ꝍ',
'&oeligacute;': '\ue659', '&OEligacute;': '\ue259',
'&oeligdblac;': '\uebc9', '&OEligdblac;': '\uebc8',
'&oeligmacr;': '\ue65d', '&OEligmacr;': '\ue25d',
'&oeligmacrbreve;': '\ue660', '&OEligmacrbreve;': '\ue260',
'&oolig;': 'ꝏ', '&OOlig;': 'Ꝏ', '&ooliguml;': '\uebe5',
'&OOliguml;': '\uebe4', '&ooligacute;': '\uefe9',
'&OOligacute;': '\uefe8', '&ooligdblac;': '\uefed',
'&OOligdblac;': '\uefec', '&ooligdotbl;': '\ueffd',
'&OOligdotbl;': '\ueffc', '&oasup;': '\ue643', '&oesup;': '\ue644',
'&Oesup;': '\ue244', '&oisup;': '\ue645', '&oosup;': '\ue8e9',
'&ousup;': '\ue646', '&Ousup;': '\ue246', '&ovsup;': '\ue647',
'&oopen;': 'ɔ', '&oopenmacr;': '\ue7cc', '&penl;': '\ueef1',
'&pscap;': 'ᴘ', '&pbardes;': 'ꝑ', '&Pbardes;': 'Ꝑ', '&pflour;': 'ꝓ',
'&Pflour;': 'Ꝓ', '&psquirrel;': 'ꝕ', '&Psquirrel;': 'Ꝕ',
'&pdotbl;': '\ue66d', '&Pdotbl;': '\ue26d', '&pdot;': 'ṗ',
'&Pdot;': 'Ṗ', '&pscapdot;': '\uebcf', '&pacute;': 'ṕ',
'&Pacute;': 'Ṕ', '&pmacr;': '\ue665', '&pplig;': '\ueed6',
'&PPlig;': '\ueedd', '&ppflourlig;': '\ueed7', '&ppliguml;': '\uebe7',
'&PPliguml;': '\uebe6', '&Prev;': 'ꟼ', '&qenl;': '\ueef2',
'&qscap;': '\uef0c', '&qslstrok;': 'ꝙ', '&Qslstrok;': 'Ꝙ',
'&qbardes;': 'ꝗ', '&Qbardes;': 'Ꝗ', '&qbardestilde;': '\ue68b',
'&q2app;': '\ue8b3', '&q3app;': '\ue8bf', '&qcentrslstrok;': '\ue8b4',
'&qdotbl;': '\ue688', '&Qdotbl;': '\ue288', '&qdot;': '\ue682',
'&Qdot;': '\ue282', '&qmacr;': '\ue681', '&qvinslig;': '\uead1',
'&Qstem;': '\uf22c', '&renl;': '\ueef3', '&rscap;': 'ʀ', '&YR;': 'Ʀ',
'&rdes;': 'ɼ', '&rdesstrok;': '\ue7e4', '&rtailstrok;': 'ꝵ',
'&rscaptailstrok;': 'ꝶ', '&Rtailstrok;': '℞', '&Rslstrok;': '℟',
'&rdotbl;': 'ṛ', '&Rdotbl;': 'Ṛ', '&rdot;': 'ṙ', '&Rdot;': 'Ṙ',
'&rscapdot;': '\uef22', 'ŕ': 'ŕ', 'Ŕ': 'Ŕ',
'&rringbl;': '\ue6a3', '&rscapdotbl;': '\uef2b', '&resup;': '\ue8ea',
'&rrot;': 'ꝛ', '&Rrot;': 'Ꝛ', '&rrotdotbl;': '\ue7c1',
'&rrotacute;': '\uebb9', '&rins;': 'ꞃ', '&Rins;': 'Ꞃ',
'&rflour;': '\uf19b', '&senl;': '\ueef4', '&sscap;': 'ꜱ',
'⋅': 'ṡ', '&Sdot;': 'Ṡ', '&sscapdot;': '\uef23', 'ś': 'ś',
'Ś': 'Ś', '&sdotbl;': 'ṣ', '&Sdotbl;': 'Ṣ',
'&sscapdotbl;': '\uef2c', 'ß': 'ß', '&SZlig;': 'ẞ',
'&slongaumllig;': '\ueba0', '&slongchlig;': '\uf4fa',
'&slonghlig;': '\ueba1', '&slongilig;': '\ueba2',
'&slongjlig;': '\uf4fb', '&slongklig;': '\uf4fc',
'&slongllig;': '\ueba3', '&slongoumllig;': '\ueba4',
'&slongplig;': '\ueba5', '&slongslig;': '\uf4fd',
'&slongslonglig;': '\ueba6', '&slongslongilig;': '\ueba7',
'&slongslongklig;': '\uf4fe', '&slongslongllig;': '\ueba8',
'&slongslongtlig;': '\uf4ff', '&stlig;': 'st', '&slongtlig;': 'ſt',
'&slongtilig;': '\ueba9', '&slongtrlig;': '\uebaa',
'&slonguumllig;': '\uebab', '&slongvinslig;': '\uebac',
'&slongdestlig;': '\ueada', '&slong;': 'ſ', '&slongenl;': '\ueedf',
'&slongbarslash;': 'ẜ', '&slongbar;': 'ẝ', '&slongovlmed;': '\ue79e',
'&slongslstrok;': '\ue8b8', '&slongflour;': '\ue8b7',
'&slongacute;': '\uebaf', '&slongdes;': '\uf127',
'&slongdotbl;': '\ue7c2', '&Sclose;': '\uf126', '&sclose;': '\uf128',
'&sins;': 'ꞅ', '&Sins;': 'Ꞅ', '&tenl;': '\ueef5', '&tscap;': 'ᴛ',
'&ttailstrok;': 'ꝷ', '&togon;': '\ue6ee', '&Togon;': '\ue2ee',
'&tdotbl;': 'ṭ', '&Tdotbl;': 'Ṭ', '⃛': 'ṫ', '&Tdot;': 'Ṫ',
'&tscapdot;': '\uef24', '&tscapdotbl;': '\uef2d',
'&tacute;': '\ue6e2', '&Tacute;': '\ue2e2', '&trlig;': '\ueed8',
'&ttlig;': '\ueed9', '&trottrotlig;': '\ueeda', '&tylig;': '\ueedb',
'&tzlig;': '\ueedc', '&trot;': 'ꞇ', '&Trot;': 'Ꞇ',
'&tcurl;': '\uf199', '&uenl;': '\ueef7', '&uscap;': 'ᴜ',
'&ubar;': 'ʉ', 'ų': 'ų', 'Ų': 'Ų', '&ucurl;': '\ue731',
'&Ucurl;': '\ue331', '&udotbl;': 'ụ', '&Udotbl;': 'Ụ',
'&ubrevinvbl;': '\ue727', '&udot;': '\ue715', '&Udot;': '\ue315',
'ü': 'ü', 'Ü': 'Ü', 'ú': 'ú', 'Ú': 'Ú',
'ű': 'ű', 'Ű': 'Ű', '&udotacute;': '\uebff',
'&Udotacute;': '\uebfe', 'ù': 'ù', 'Ù': 'Ù',
'&uvertline;': '\ue724', '&Uvertline;': '\ue324', 'û': 'û',
'Û': 'Û', 'ücirc;': '\ue717', 'Ücirc;': '\ue317',
'&ucar;': 'ǔ', '&Ucar;': 'Ǔ', 'ů': 'ů', 'Ů': 'Ů',
'&uhook;': 'ủ', '&Uhook;': 'Ủ', '&ucurlbar;': '\uebbf',
'ŭ': 'ŭ', 'Ŭ': 'Ŭ', 'ū': 'ū', 'Ū': 'Ū',
'&umacrbreve;': '\ue70b', '&Umacrbreve;': '\ue30b',
'&umacracute;': '\ue709', '&Umacracute;': '\ue309', 'ümacr;': 'ǖ',
'Ümacr;': 'Ǖ', '&uasup;': '\ue8eb', '&uesup;': '\ue72b',
'&Uesup;': '\ue32b', '&uisup;': '\ue72c', '&uosup;': '\ue72d',
'&Uosup;': '\ue32d', '&uvsup;': '\ue8ec', '&uwsup;': '\ue8ed',
'&venl;': '\ueef8', '&vscap;': 'ᴠ', '&vbar;': '\ue74e',
'&vslash;': '\ue8ba', '&vdiagstrok;': 'ꝟ', '&Vdiagstrok;': 'Ꝟ',
'&Vslstrok;': '℣', '&vdotbl;': 'ṿ', '&Vdotbl;': 'Ṿ',
'&vdot;': '\ue74c', '&Vdot;': '\ue34c', '&vuml;': '\ue742',
'&Vuml;': '\ue342', '&vacute;': '\ue73a', '&Vacute;': '\ue33a',
'&vdblac;': '\ue74b', '&Vdblac;': '\ue34b', '&vcirc;': '\ue73b',
'&Vcirc;': '\ue33b', '&vring;': '\ue743', '&vmacr;': '\ue74d',
'&Vmacr;': '\ue34d', '&Vovlhigh;': '\uf7b2', '&wynn;': 'ƿ',
'&WYNN;': 'Ƿ', '&vins;': 'ꝩ', '&Vins;': 'Ꝩ', '&vinsdotbl;': '\ue7e6',
'&Vinsdotbl;': '\ue3e6', '&vinsdot;': '\ue7e7', '&Vinsdot;': '\ue3e7',
'&vinsacute;': '\uebbb', '&Vinsacute;': '\uebba', '&vwelsh;': 'ỽ',
'&Vwelsh;': 'Ỽ', '&wenl;': '\ueef9', '&wscap;': 'ᴡ', '&wdotbl;': 'ẉ',
'&Wdotbl;': 'Ẉ', '&wdot;': 'ẇ', '&Wdot;': 'Ẇ', '&wuml;': 'ẅ',
'&Wuml;': 'Ẅ', '&wacute;': 'ẃ', '&Wacute;': 'Ẃ', '&wdblac;': '\ue750',
'&Wdblac;': '\ue350', '&wgrave;': 'ẁ', '&Wgrave;': 'Ẁ',
'ŵ': 'ŵ', 'Ŵ': 'Ŵ', '&wring;': 'ẘ', '&wmacr;': '\ue757',
'&Wmacr;': '\ue357', '&wasup;': '\ue8f0', '&wesup;': '\ue753',
'&Wesup;': '\ue353', '&wisup;': '\ue8f1', '&wosup;': '\ue754',
'&wusup;': '\ue8f2', '&wvsup;': '\ue8f3', '&xenl;': '\ueefa',
'&xscap;': '\uef11', '&xmod;': 'ˣ', '&xslashula;': '\ue8bd',
'&xslashlra;': '\ue8be', '&Xovlhigh;': '\uf7b3', '&xldes;': '\uf232',
'¥l;': '\ueefb', '&yscap;': 'ʏ', '&ybar;': '\ue77b',
'&ycurl;': '\ue785', '&Ycurl;': '\ue385', '&ydotbl;': 'ỵ',
'&Ydotbl;': 'Ỵ', '&ydot;': 'ẏ', '&Ydot;': 'Ẏ', 'ÿ': 'ÿ',
'Ÿ': 'Ÿ', 'ý': 'ý', 'Ý': 'Ý', '&ydblac;': '\ue77c',
'&Ydblac;': '\ue37c', '&ydotacute;': '\ue784',
'&Ydotacute;': '\ue384', '&ygrave;': 'ỳ', '&Ygrave;': 'Ỳ',
'ŷ': 'ŷ', 'Ŷ': 'Ŷ', '&yring;': 'ẙ', '&yhook;': 'ỷ',
'&Yhook;': 'Ỷ', '&ybreve;': '\ue776', '&Ybreve;': '\ue376',
'&ymacr;': 'ȳ', '&Ymacr;': 'Ȳ', '&ymacrbreve;': '\ue775',
'&Ymacrbreve;': '\ue375', '&ymacracute;': '\ue773',
'&Ymacracute;': '\ue373', '&yylig;': 'ꝡ', '&YYlig;': 'Ꝡ',
'&yyliguml;': '\uebe9', '&YYliguml;': '\uebe8',
'&yyligdblac;': '\uebcb', '&YYligdblac;': '\uebca',
'&yesup;': '\ue781', '&yrgmainstrok;': '\uf233', '&yloop;': 'ỿ',
'&Yloop;': 'Ỿ', '&zenl;': '\ueefc', '&zscap;': 'ᴢ', '&zstrok;': 'ƶ',
'&Zstrok;': 'Ƶ', '&zdotbl;': 'ẓ', '&Zdotbl;': 'Ẓ', 'ż': 'ż',
'Ż': 'Ż', '&zvisigot;': 'ꝣ', '&Zvisigot;': 'Ꝣ', '&ezh;': 'ʒ',
'&EZH;': 'Ʒ', '&yogh;': 'ȝ', '&YOGH;': 'Ȝ', 'þ': 'þ',
'Þ': 'Þ', 'þenl;': '\ueef6', 'þscap;': '\uef15',
'þbar;': 'ꝥ', 'Þbar;': 'Ꝥ', 'þovlmed;': '\ue7a2',
'þbarslash;': '\uf149', 'Þbarslash;': '\ue337',
'þbardes;': 'ꝧ', 'Þbardes;': 'Ꝧ', 'þdotbl;': '\ue79f',
'Þdotbl;': '\ue39f', 'þacute;': '\ue737',
'þslonglig;': '\ue734', 'þslongligbar;': '\ue735',
'þrarmlig;': '\ue8c1', '¼': '¼', '½': '½',
'¾': '¾', '&sup0;': '⁰', '¹': '¹', '²': '²',
'³': '³', '&sup4;': '⁴', '&sup5;': '⁵', '&sup6;': '⁶',
'&sup7;': '⁷', '&sup8;': '⁸', '&sup9;': '⁹', '&sub0;': '₀',
'&sub1;': '₁', '&sub2;': '₂', '&sub3;': '₃', '&sub4;': '₄',
'&sub5;': '₅', '&sub6;': '₆', '&sub7;': '₇', '&sub8;': '₈',
'&sub9;': '₉', '&romnumCDlig;': 'ↀ', '&romnumDDlig;': 'ↁ',
'&romnumDDdbllig;': 'ↂ', '&romnumCrev;': 'Ↄ',
'&romnumCrevovl;': '\uf23f', '&Imod;': 'ᴵ', '&Vmod;': '\uf1be',
'&Xmod;': '\uf1bf', '&asup;': 'ͣ', 'æsup;': 'ᷔ',
'&anligsup;': '\uf036', '&anscapligsup;': '\uf03a', '&aoligsup;': 'ᷕ',
'&arligsup;': '\uf038', '&arscapligsup;': '\uf130', '&avligsup;': 'ᷖ',
'&bsup;': '\uf012', '&bscapsup;': '\uf013', '⫐': 'ͨ',
'çsup;': 'ᷗ', '&dsup;': 'ͩ', '&drotsup;': 'ᷘ', 'ðsup;': 'ᷙ',
'&dscapsup;': '\uf016', '&esup;': 'ͤ', '&eogonsup;': '\uf135',
'&emacrsup;': '\uf136', '&fsup;': '\uf017', '&gsup;': 'ᷚ',
'&gscapsup;': 'ᷛ', '&hsup;': 'ͪ', '&isup;': 'ͥ',
'&inodotsup;': '\uf02f', '&jsup;': '\uf030', '&jnodotsup;': '\uf031',
'&ksup;': 'ᷜ', '&kscapsup;': '\uf01c', '&lsup;': 'ᷝ',
'&lscapsup;': 'ᷞ', '&msup;': 'ͫ', '&mscapsup;': 'ᷟ', '⊅': 'ᷠ',
'&nscapsup;': 'ᷡ', '&osup;': 'ͦ', '&omacrsup;': '\uf13f',
'øsup;': '\uf032', '&oogonsup;': '\uf13e',
'&orrotsup;': '\uf03e', '&orumsup;': '\uf03f', '&psup;': '\uf025',
'&qsup;': '\uf033', '&rsup;': 'ͬ', '&rrotsup;': 'ᷣ',
'&rumsup;': '\uf040', '&rscapsup;': 'ᷢ', '&ssup;': 'ᷤ',
'&slongsup;': 'ᷥ', '&tsup;': 'ͭ', '&trotsup;': '\uf03b',
'&tscapsup;': '\uf02a', '&usup;': 'ͧ', '&vsup;': 'ͮ',
'&wsup;': '\uf03c', '&xsup;': 'ͯ', '&ysup;': '\uf02b', '&zsup;': 'ᷦ',
'þsup;': '\uf03d', '&combgrave;': '̀', '&combacute;': '́',
'&combcirc;': '̂', '&combcircdbl;': '᷍', '&combtilde;': '̃',
'&combmacr;': '̄', '&combbreve;': '̆', '&combdot;': '̇',
'&combuml;': '̈', '&combhook;': '̉', '&combring;': '̊',
'&combdblac;': '̋', '&combsgvertl;': '̍', '&combdbvertl;': '̎',
'&combdotbl;': '̣', '&combced;': '̧', '&dblbarbl;': '̳',
'&dblovl;': '̿', '&combogon;': '̨', '&combastbl;': '͙',
'&combdblbrevebl;': '͜', '&combtripbrevebl;': '\uf1fc',
'&combcurl;': '᷎', '&combcurlhigh;': '\uf1c5',
'&combdothigh;': '\uf1ca', '&combcurlbar;': '\uf1cc', '&bar;': '̅',
'¯high;': '\uf00a', '¯med;': '\uf00b', '&ovlhigh;': '\uf00c',
'&ovlmed;': '\uf00d', '&barbl;': '̲', '&baracr;': '̶',
'&arbar;': '\uf1c0', '&combcomma;': '̕', '&combtildevert;': '̾',
'&er;': '͛', '&erang;': '\uf1c7', '&ercurl;': '\uf1c8',
'&ersub;': '᷏', '&ra;': 'ᷓ', '&rabar;': '\uf1c1', '&urrot;': '\uf153',
'&urlemn;': '\uf1c2', '&ur;': '᷑', '&us;': '᷒', '&combisbelow;': '᷐',
'.': '.', ';': ';', '&': '&', 'Θ': 'Θ',
'θ': 'θ', '&obiit;': 'ꝋ', '&OBIIT;': 'Ꝋ', '&et;': '⁊',
'&etslash;': '\uf158', '&ET;': '\uf142', '&ETslash;': '\uf1a7',
'&apomod;': 'ʼ', '&esse;': '≈', '&est;': '∻', '&condes;': 'ꝯ',
'&CONdes;': 'Ꝯ', '&condot;': 'ꜿ', '&CONdot;': 'Ꜿ',
'&usbase;': '\uf1a6', '&USbase;': '\uf1a5', '&usmod;': 'ꝰ',
'&rum;': 'ꝝ', '&RUM;': 'Ꝝ', '&de;': '\uf159', '&is;': 'ꝭ',
'&IS;': 'Ꝭ', '&sstrok;': 'ꝸ', '&etfin;': 'ꝫ', '&ETfin;': 'Ꝫ',
'&sem;': '\uf1ac', '&fMedrun;': 'ᚠ', '&mMedrun;': 'ᛘ', '&lbbar;': '℔',
'ˆ': '^', '´': '´', '`': '`', '¨': '¨',
'&tld;': '~', '¯': '¯', '˘': '˘', '˙': '˙',
'˚': '˚', '¸': '¸', '˛': '˛', '˜': '˜',
'˝': '˝', '&verbarup;': 'ˈ', '·': '·',
'&hyphpoint;': '‧', '&sgldr;': '․', '&dblldr;': '‥', '…': '…',
':': ':', ',': ',', '&tridotright;': '჻',
'&tridotupw;': '∴', '&tridotdw;': '∵', '&quaddot;': '∷',
'&lozengedot;': '⁘', '&midring;': '\uf1da', '|': '|',
'¦': '¦', '‖': '‖', '/': '/', '&fracsol;': '⁄',
'&dblsol;': '⫽', '\': '\\', '&luslst;': '⸌', '&ruslst;': '⸍',
'&rlslst;': '⸜', '&llslst;': '⸝', '_': '_', '‐': '-',
'‐': '‐', '&nbhy;': '‑', '&dbloblhyph;': '⸗', '&numdash;': '‒',
'–': '–', '—': '—', '―': '―', '!': '!',
| |
differences : sparse array
A NxN dimensional sparse array representing a weighted adjancency matrix, with the edge weights corresponding to the backward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
mean hierarchical difference : float
The mean of the distribution of backward hierarchical differences.
backward hierarchical incoherence : float
The standard deviation of the distribution of backward hierarchical differences.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
TD = backward_hierarchical_differences(graph, weight=weight)
m = average(TD, weights=A)
m2 = average(TD**2, weights=A)
elif isinstance(graph, spmatrix):
A = graph
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = (A.multiply(TD)).sum() / A.sum()
m2 = (A.multiply(TD.power(2))).sum() / A.sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = (A.multiply(TD)).sum() / A.sum()
m2 = (A.multiply(TD.power(2))).sum() / A.sum()
std = (m2 - m**2)**0.5
return TD, m, std
# Returns a measure of equitable controllability over the full graph/network
def forward_democracy_coefficient(graph, weight=None):
"""Returns the forward democracy coeffcient of a graph, a topological network metric.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward democracy coefficient : float
forward democracy coefficient of a graph
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
TD = forward_hierarchical_differences(graph, weight=weight)
m = average(TD, weights=A)
elif isinstance(graph, spmatrix):
A = graph.transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = (A.multiply(TD)).sum() / A.sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = (A.multiply(TD)).sum() / A.sum()
return 1 - m
# Returns a measure of equitable controllability over the full graph/network
def backward_democracy_coefficient(graph, weight=None):
"""Returns the backward democracy coeffcient of a graph, a topological network metric.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward democracy coefficient : float
backward democracy coefficient of a graph
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
TD = backward_hierarchical_differences(graph, weight=weight)
m = average(TD, weights=A)
elif isinstance(graph, spmatrix):
A = graph
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = (A.multiply(TD)).sum() / A.sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
m = (A.multiply(TD)).sum() / A.sum()
return 1 - m
def node_forward_influence_centrality(graph, node, weight=None):
"""Returns the forward influence centrality of the given node in the network.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
node : number
Label of the node as determined by the indexing of the graph.nodes() call or the index of the numpy/sparse array.
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward influence centrality : float
A node's forward influence centrality.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
index = node
TD = forward_hierarchical_differences(graph, weight=weight)
if A[index].sum() == 0:
m = 0
else:
m = average(TD[index], weights=A[index])
elif isinstance(graph, spmatrix):
A = graph.transpose()
index = node
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
if A[index].sum() == 0:
m = 0
else:
m = (A[index].multiply(TD[index])).sum() / A[index].sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
index = list(graph.nodes).index(node)
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
if A[index].sum() == 0:
m = 0
else:
m = (A[index].multiply(TD[index])).sum() / A[index].sum()
return 1 - m
def node_backward_influence_centrality(graph, node, weight=None):
"""Returns the backward influence centrality of the given node in the network.
Parameters
----------
graph : Graph array
A NetworkX graph or numpy/sparse array
node : number
Label of the node as determined by the indexing of the graph.nodes() call or the index of the numpy/sparse array.
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance, otherwise the default is None.
Returns
-------
backward influence centrality : float
A node's backward influence centrality.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
index = node
TD = backward_hierarchical_differences(graph, weight=weight)
if A[index].sum() == 0:
m = 0
else:
m = average(TD[index], weights=A[index])
elif isinstance(graph, spmatrix):
A = graph
index = node
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
if A[index].sum() == 0:
m = 0
else:
m = (A[index].multiply(TD[index])).sum() / A[index].sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
index = list(graph.nodes).index(node)
TD = sparse_backward_hierarchical_differences(graph, weight=weight).tocsr()
if A[index].sum() == 0:
m = 0
else:
m = (A[index].multiply(TD[index])).sum() / A[index].sum()
return 1 - m
def forward_influence_centrality(graph, weight=None):
"""Returns the forward influence centrality of the nodes in a network as an array.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance, otherwise the default is None.
Returns
-------
forward influence centrality : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their forward influence centralities.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
TD = forward_hierarchical_differences(graph, weight=weight)
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = average(TD[i], weights=A[i])
elif isinstance(graph, spmatrix):
A = graph.transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = (A[i].multiply(TD[i])).sum() / A[i].sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = (A[i].multiply(TD[i])).sum() / A[i].sum()
return ones((m.shape[0], 1)) - m
def backward_influence_centrality(graph, weight=None):
"""Returns the backward influence centrality of the nodes in a network as an array.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance, otherwise the default is None.
Returns
-------
backward influence centrality : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their backward influence centralities.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
TD = backward_hierarchical_differences(graph, weight=weight)
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = | |
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.InterfaceParameters.InterfaceProgramRate, self).__init__()
self.yang_name = "interface-program-rate"
self.yang_parent_name = "interface-parameters"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "interface-program-rate"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.InterfaceParameters.InterfaceProgramRate, ['value', 'unit'], name, value)
class PortShaperRate(Entity):
"""
Port Shaper Rate
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.InterfaceParameters.PortShaperRate, self).__init__()
self.yang_name = "port-shaper-rate"
self.yang_parent_name = "interface-parameters"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "port-shaper-rate"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.InterfaceParameters.PortShaperRate, ['value', 'unit'], name, value)
class SkywarpQosPolicyClass(Entity):
"""
Skywarp QoS policy class details
.. attribute:: qos_show_pclass_st
qos show pclass st
**type**\: list of :py:class:`QosShowPclassSt <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass, self).__init__()
self.yang_name = "skywarp-qos-policy-class"
self.yang_parent_name = "bundle-output"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("qos-show-pclass-st", ("qos_show_pclass_st", PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt))])
self._leafs = OrderedDict()
self.qos_show_pclass_st = YList(self)
self._segment_path = lambda: "skywarp-qos-policy-class"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass, [], name, value)
class QosShowPclassSt(Entity):
"""
qos show pclass st
.. attribute:: queue
QoS Queue parameters
**type**\: :py:class:`Queue <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Queue>`
.. attribute:: shape
QoS EA Shaper parameters
**type**\: :py:class:`Shape <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape>`
.. attribute:: wfq
QoS WFQ parameters
**type**\: :py:class:`Wfq <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq>`
.. attribute:: police
QoS Policer parameters
**type**\: :py:class:`Police <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Police>`
.. attribute:: marking
QoS Mark parameters
**type**\: :py:class:`Marking <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Marking>`
.. attribute:: class_level
Class level
**type**\: int
**range:** 0..255
.. attribute:: class_name
Class name
**type**\: str
**length:** 0..65
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt, self).__init__()
self.yang_name = "qos-show-pclass-st"
self.yang_parent_name = "skywarp-qos-policy-class"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("queue", ("queue", PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Queue)), ("shape", ("shape", PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape)), ("wfq", ("wfq", PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq)), ("police", ("police", PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Police)), ("marking", ("marking", PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Marking))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('class_level', YLeaf(YType.uint8, 'class-level')),
('class_name', YLeaf(YType.str, 'class-name')),
])
self.class_level = None
self.class_name = None
self.queue = PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Queue()
self.queue.parent = self
self._children_name_map["queue"] = "queue"
self._children_yang_names.add("queue")
self.shape = PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape()
self.shape.parent = self
self._children_name_map["shape"] = "shape"
self._children_yang_names.add("shape")
self.wfq = PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq()
self.wfq.parent = self
self._children_name_map["wfq"] = "wfq"
self._children_yang_names.add("wfq")
self.police = PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Police()
self.police.parent = self
self._children_name_map["police"] = "police"
self._children_yang_names.add("police")
self.marking = PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Marking()
self.marking.parent = self
self._children_name_map["marking"] = "marking"
self._children_yang_names.add("marking")
self._segment_path = lambda: "qos-show-pclass-st"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt, ['class_level', 'class_name'], name, value)
class Queue(Entity):
"""
QoS Queue parameters
.. attribute:: queue_id
Queue ID
**type**\: int
**range:** 0..4294967295
.. attribute:: queue_type
Queue Type
**type**\: str
**length:** 0..101
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Queue, self).__init__()
self.yang_name = "queue"
self.yang_parent_name = "qos-show-pclass-st"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('queue_id', YLeaf(YType.uint32, 'queue-id')),
('queue_type', YLeaf(YType.str, 'queue-type')),
])
self.queue_id = None
self.queue_type = None
self._segment_path = lambda: "queue"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Queue, ['queue_id', 'queue_type'], name, value)
class Shape(Entity):
"""
QoS EA Shaper parameters
.. attribute:: pir
PIR in kbps
**type**\: :py:class:`Pir <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pir>`
.. attribute:: pbs
PBS in bytes
**type**\: :py:class:`Pbs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pbs>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape, self).__init__()
self.yang_name = "shape"
self.yang_parent_name = "qos-show-pclass-st"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("pir", ("pir", PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pir)), ("pbs", ("pbs", PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pbs))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.pir = PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pir()
self.pir.parent = self
self._children_name_map["pir"] = "pir"
self._children_yang_names.add("pir")
self.pbs = PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pbs()
self.pbs.parent = self
self._children_name_map["pbs"] = "pbs"
self._children_yang_names.add("pbs")
self._segment_path = lambda: "shape"
class Pir(Entity):
"""
PIR in kbps
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pir, self).__init__()
self.yang_name = "pir"
self.yang_parent_name = "shape"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "pir"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pir, ['value', 'unit'], name, value)
class Pbs(Entity):
"""
PBS in bytes
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pbs, self).__init__()
self.yang_name = "pbs"
self.yang_parent_name = "shape"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "pbs"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Shape.Pbs, ['value', 'unit'], name, value)
class Wfq(Entity):
"""
QoS WFQ parameters
.. attribute:: committed_weight
Committed Weight
**type**\: :py:class:`CommittedWeight <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.CommittedWeight>`
.. attribute:: programmed_wfq
QoS Programmed WFQ parameters
**type**\: :py:class:`ProgrammedWfq <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq>`
.. attribute:: excess_weight
Excess Weight
**type**\: int
**range:** 0..65535
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq, self).__init__()
self.yang_name = "wfq"
self.yang_parent_name = "qos-show-pclass-st"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("committed-weight", ("committed_weight", PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.CommittedWeight)), ("programmed-wfq", ("programmed_wfq", PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('excess_weight', YLeaf(YType.uint16, 'excess-weight')),
])
self.excess_weight = None
self.committed_weight = PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.CommittedWeight()
self.committed_weight.parent = self
self._children_name_map["committed_weight"] = "committed-weight"
self._children_yang_names.add("committed-weight")
self.programmed_wfq = PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq()
self.programmed_wfq.parent = self
self._children_name_map["programmed_wfq"] = "programmed-wfq"
self._children_yang_names.add("programmed-wfq")
self._segment_path = lambda: "wfq"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq, ['excess_weight'], name, value)
class CommittedWeight(Entity):
"""
Committed Weight
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.CommittedWeight, self).__init__()
self.yang_name = "committed-weight"
self.yang_parent_name = "wfq"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "committed-weight"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.CommittedWeight, ['value', 'unit'], name, value)
class ProgrammedWfq(Entity):
"""
QoS Programmed WFQ parameters
.. attribute:: bandwidth
Bandwidth
**type**\: :py:class:`Bandwidth <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.Bandwidth>`
.. attribute:: sum_of_bandwidth
Sum of Bandwidth
**type**\: :py:class:`SumOfBandwidth <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.SumOfBandwidth>`
.. attribute:: excess_ratio
Excess Ratio
**type**\: int
**range:** 0..65535
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq, self).__init__()
self.yang_name = "programmed-wfq"
self.yang_parent_name = "wfq"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("bandwidth", ("bandwidth", PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.Bandwidth)), ("sum-of-bandwidth", ("sum_of_bandwidth", PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.SumOfBandwidth))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('excess_ratio', YLeaf(YType.uint16, 'excess-ratio')),
])
self.excess_ratio = None
self.bandwidth = PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.Bandwidth()
self.bandwidth.parent = self
self._children_name_map["bandwidth"] = "bandwidth"
self._children_yang_names.add("bandwidth")
self.sum_of_bandwidth = PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.SumOfBandwidth()
self.sum_of_bandwidth.parent = self
self._children_name_map["sum_of_bandwidth"] = "sum-of-bandwidth"
self._children_yang_names.add("sum-of-bandwidth")
self._segment_path = lambda: "programmed-wfq"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq, ['excess_ratio'], name, value)
class Bandwidth(Entity):
"""
Bandwidth
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.Bandwidth, self).__init__()
self.yang_name = "bandwidth"
self.yang_parent_name = "programmed-wfq"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "bandwidth"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.BundleInterfaces.BundleInterface.MemberInterfaces.MemberInterface.BundleOutput.SkywarpQosPolicyClass.QosShowPclassSt.Wfq.ProgrammedWfq.Bandwidth, ['value', 'unit'], name, value)
class SumOfBandwidth(Entity):
"""
Sum of Bandwidth
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix | |
<reponame>PrincetonCompMemLab/narrative
from csv import reader
from copy import deepcopy
import os
import numpy as np
import re
#import sys # not used
import json
import cStringIO
# constants
# attach end_of_state, end_of_story marker
# MARK_END_STATE = False
# attach question marker at the end of the state (e.g. Q_subject)
# ATTACH_QUESTIONS = False
# it generates symbolic states(e.g. "<NAME>_goodbye Nick")
# GEN_SYMBOLIC_STATES = False
# inserts the role before the filler (e.g. Subject Mariko bla bla bla...)
# ATTACH_ROLE_MARKER = False
# ATTACH_ROLE_MARKER_BEFORE = ['Pronoun', 'Name', 'Pronoun_possessive', 'Pronoun_object']
# These global variables are probably fine!
END_STATE_MARKER = 'ENDOFSTATE'
END_STORY_MARKER = 'ENDOFSTORY'
OUTPUT_ROOT = os.path.join(os.path.dirname(__file__), os.path.pardir, 'story')
INPUT_PATH = os.path.join(os.path.dirname(__file__), os.path.pardir, 'schema')
FILE_FORMAT = '.txt'
# helper functions
class Transition:
"""Represents a single set of transitions, with a condition"""
def __init__(self, trans_cond, probs, trans_states):
self.trans_cond = trans_cond
self.probs = probs
self.trans_states = trans_states
def matches_cond(self, grounding, attributes):
if self.trans_cond == 'Default':
return True
cond_split = self.trans_cond.replace('.', ' ').split(' ')
cond_fill = attributes[grounding[cond_split[0]]][cond_split[1]]
cond_fill = unicode(cond_fill, 'utf-8')
if not cond_fill.isnumeric():
cond_fill = "\"" + cond_fill + "\""
cond_split[0] = cond_fill
cond_split[1] = ''
return eval(''.join(cond_split))
def get_trans_states(self):
return self.trans_states
def get_trans_cond(self):
return self.trans_cond
def get_probs(self):
return self.probs
class State:
"""Represents a state with text and a list of possible transition sets"""
def __init__(self, text, trans_list, roles_list):
self.text = text
self.trans_list = trans_list
self.roles_list = roles_list
def __get_trans_list_idx(self, grounding, attributes):
'''
figure out the correct index of the transitional distributions to use
:param grounding:
:param attributes:
:return:
'''
i = 0
while not self.trans_list[i].matches_cond(grounding, attributes):
i += 1
return i
def sample_next(self, grounding, attributes):
i = self.__get_trans_list_idx(grounding, attributes)
probs = self.trans_list[i].probs
trans_states = self.trans_list[i].trans_states
next_state = np.random.choice(trans_states, p=probs)
return next_state
def get_distribution(self, grounding, attributes):
'''
get the distribution of the next states
:param grounding:
:param attributes:
:return:
'''
i = self.__get_trans_list_idx(grounding, attributes)
probs = self.trans_list[i].probs
trans_states = self.trans_list[i].trans_states
distribution = dict(zip(trans_states, probs))
trans_cond = self.trans_list[i].get_trans_cond()
return distribution, trans_cond
def get_num_next_states(self,grounding, attributes):
i = self.__get_trans_list_idx(grounding, attributes)
return len(self.trans_list[i].trans_states)
def get_roles(self):
# this function is a helper for getting possible questions for this state
return self.roles_list
def read_schema_file(input_fname):
print('Schema = %s' %
os.path.abspath(os.path.join(INPUT_PATH, input_fname)) + FILE_FORMAT)
attributes = dict()
entities = dict()
roles = dict()
states = dict()
f = open(os.path.join(INPUT_PATH, input_fname) + FILE_FORMAT)
# Read entities and their attributes
# each entity has a list of fillers - e.g. Person: ['Olivia', 'Mariko', ...]
# each filler has a dict of features - e.g. Mariko : {'Mood': 'nervous', ...}
assert f.readline().strip() == "Entities", "Spec file must start with Entities"
f.readline() # Dashed line
while True:
nextline = f.readline().strip()
if nextline == 'Roles':
break
ent_spec = nextline.split(':')
ent_name = ent_spec[0]
ent_attr = [x.strip() for x in ent_spec[1].split(',')]
entities[ent_name] = []
inst_line = f.readline().strip()
while inst_line:
# Use csv reader here, to ignore commas inside quotes
instance = [x for x in reader([inst_line], skipinitialspace=True)][0]
assert len(instance) == len(ent_attr), \
"Instance %s does not match entity spec" % instance[0]
entities[ent_name].append(instance[0])
attributes[instance[0]] = dict()
for i, a in enumerate(ent_attr):
attributes[instance[0]][a] = instance[i]
inst_line = f.readline().strip()
# Read roles
# the role of high-level entity - e.g. Friend: Person
f.readline() # Dashed line
role_line = f.readline().strip()
while role_line:
role = [x.strip() for x in role_line.split(':')]
roles[role[0]] = role[1]
role_line = f.readline().strip()
# Read States and transitions
# state
# transition
assert f.readline().strip() == "States", "States must follow Roles"
f.readline() # Dashed line
while True:
state_name = f.readline().strip()
text = f.readline().strip()
# gather all roles that can be queried, for QA
roles_list = []
roles_list_with_field = re.findall(r'\[(.*?)\]', text)
for role in roles_list_with_field:
roles_list.append(os.path.splitext(role)[0])
roles_list = set(roles_list)
if state_name == "END":
states[state_name] = State(text, [], roles_list)
break
# gather the next states with edge weights (if not end)
trans_list = []
trans_line = f.readline().strip()
while trans_line:
trans_split = trans_line.split(':')
trans_cond = trans_split[0]
probs = []
trans_states = []
assert len(trans_split) == 2, "Transition should have one colon - %s" % trans_line
for x in trans_split[1].split(','):
[p,s] = x.strip().split(' ')
probs.append(p)
trans_states.append(s)
probs = np.array(probs).astype(np.float)
trans_list.append(Transition(trans_cond, probs, trans_states))
trans_line = f.readline().strip()
# grab all info to represent the state
states[state_name] = State(text, trans_list, roles_list)
f.close()
return (attributes, entities, roles, states)
def mkdir(names_concat, n_iterations, n_repeats):
output_subpath = ('%s_%s_%s' % (names_concat, str(n_iterations), str(n_repeats)))
# make output directory if not exists
if not os.path.exists(OUTPUT_ROOT):
os.makedirs(OUTPUT_ROOT)
print('mkdir: %s', OUTPUT_ROOT)
final_output_path = os.path.join(OUTPUT_ROOT, output_subpath)
if not os.path.exists(final_output_path):
os.makedirs(final_output_path)
print('- mkdir: %s', final_output_path)
return final_output_path
def open_output_file(output_path, input_fname, n_iterations, n_repeats):
n_iterations = str(n_iterations)
n_repeats = str(n_repeats)
# get a handle on the output file
output_fname = os.path.join(output_path, input_fname
+ '_' + n_iterations + '_' + n_repeats + FILE_FORMAT)
f_output = open(output_fname, 'w')
print('Output = %s' % (os.path.abspath(output_fname)))
return f_output
# Generate a sequence of stories from the same schema
# Returns a random seed for the next sequence
# and a scene (state) vector s_1:n formatted for HRR encoding
#
def write_stories(schema_info, f_stories, f_Q_next, rand_seed, n_repeats,
mark_end_state=False, attach_questions=False,
gen_symbolic_states=False, attach_role_marker=False,
attach_role_maker_before=None
):
# set the "global" variables. These don't change from story to story
stories_kwargs = dict(
mark_end_state=mark_end_state, attach_questions=attach_questions,
gen_symbolic_states=gen_symbolic_states, attach_role_marker=attach_role_marker,
attach_role_maker_before=attach_role_maker_before
)
scenes = []
# Generate stories
for i in range(n_repeats):
np.random.seed(rand_seed)
event_scenes = write_one_story(schema_info, f_stories, f_Q_next, **stories_kwargs)
scenes.extend(event_scenes)
# increment the seed, so that every story uses a different seed value
# but different runs of run_engine.py use the same sequence of seed
rand_seed += 1
return rand_seed, scenes
# Generate a single Coffee Shop World story
# Returns a scene (state) vector formatted for HRR encoding
# E.g. [ [(Ask, verb), (Tom, agent), (Charan, patient)],
# [(Answer, verb), (Charan, agent), (Tom, patient)],
# [(Punch, verb), (Tom, agent), (Charan, patient)] ]
#
def write_one_story(schema_info, f_stories, f_Q_next,
mark_end_state=False, attach_questions=False,
gen_symbolic_states=False, attach_role_marker=False,
attach_role_maker_before=None):
# using default parameters is probably better than global variables.
# set get_filled_state kwargs (static)
gfs_kwargs = dict(attach_role_marker=attach_role_marker, gen_symbolic_states=gen_symbolic_states,
attach_role_maker_before=attach_role_maker_before)
(attributes, entities, roles, states) = schema_info
grounding = get_grounding(entities, roles)
people_introduced = set()
people_all = set(entities['Person'])
role_Person = []
for role, type in roles.items():
if type == 'Person': role_Person.append(role)
# dump key-value binding to the file
f_Q_next.write(json.dumps(grounding) + '\n')
f_Q_next.write(json.dumps(attributes) + '\n')
# vector of scenes (here called states) formatted in a way convenient for HRR encoding
scenes = []
# Loop through states
curr_state = 'BEGIN'
while True:
# get the filled state for the question file
# "filled_Q" and "filled" are sync-ed by having the same arguments (1st 4)
filled_Q, _, _ = get_filled_state(curr_state, grounding, states, attributes, **gfs_kwargs)
# don't write question in the 1st iteration
# there is no next state if end
# exists alternative future -> necessary -> exists 2AFC
if curr_state != 'BEGIN' and had_alt_future:
distribution, _ = states[prev_state].get_distribution(grounding, attributes)
curr_state_p = distribution.get(curr_state)
write_alt_next_state_q_file(f_Q_next, 'Truth', curr_state_p, curr_state, filled_Q)
had_alt_future = False
# collect question text
f_Q_next.write('\n' + filled_Q + '\n')
# get a un-filled state
filled, fillers, scene = get_filled_state(curr_state, grounding, states, attributes, **gfs_kwargs)
scenes.append(scene) # append scene to scene vector
if attach_questions:
filled = attach_role_question_marker(filled, states, curr_state)
if mark_end_state:
filled += (' ' + END_STATE_MARKER)
# write text to file
f_stories.write(filled + " ")
# stopping criterion
if curr_state == 'END':
if mark_end_state:
f_stories.write(END_STORY_MARKER)
f_stories.write('\n\n')
f_Q_next.write('\n\n')
break
# update: sample next state
prev_state = curr_state
curr_state = states[curr_state].sample_next(grounding, attributes)
# get question
# generate filler inconsistent questions
alt_future, people_introduced = get_filler_inconsistent_next_state(
fillers, people_introduced, people_all, curr_state, grounding, states, attributes, f_Q_next
)
if alt_future != 0: had_alt_future = True
# generate alternative state
alt_future = get_alternative_future(prev_state, curr_state, states, grounding, attributes)
if alt_future != 0:
had_alt_future = True
alt_future_filled, _, _ = get_filled_state(alt_future, grounding, states, attributes, **gfs_kwargs)
# get the probability of the alternative future
distribution, condition = states[prev_state].get_distribution(grounding, attributes)
alt_future_p = distribution.get(alt_future)
write_alt_next_state_q_file(f_Q_next, condition, alt_future_p, alt_future, alt_future_filled)
return scenes
def write_alt_next_state_q_file(f_Q_next, condition, alt_future_p, alt_future, alt_future_filled):
'''
write the alternative (possible or impossible) state to the question file
:param f_Q_next: a handle on the question file
:param condition: the condition (e.g. subj.mood == sad)
:param alt_future_p: the probability of the alternative future state
:param alt_future: the state name of the alternative future state
:param alt_future_filled: the instantitated alternative future state
'''
f_Q_next.write(condition + '\n')
f_Q_next.write(str(alt_future_p) + '\t' + alt_future + '\t' | |
from __future__ import absolute_import
import base64
import json
import logging
import re
import subprocess
import sys
from datetime import datetime
import pytz
import zmq
import zmq.auth
from zmq.auth.thread import ThreadAuthenticator
import time
from bemoss_lib.platform.BEMOSSAgent import BEMOSSAgent
from bemoss_lib.utils import db_helper
from bemoss_lib.utils.BEMOSS_globals import *
__version__ = '3.0'
STMS = '~*~' #Sender topic message separator
def jsonify(sender, topic, msg):
""" json encode the message and prepend the topic """
return STMS.join([sender,topic,json.dumps({'message':msg})])
def dejsonify(sender_topic_msg):
""" Inverse of """
sender, topic, msg = sender_topic_msg.split(STMS)
msg = json.loads(msg)
return sender,topic,msg
node_devices_table = settings.DATABASES['default']['TABLE_node_device']
class MultiNodeAgent(BEMOSSAgent):
def __init__(self, *args, **kwargs):
super(MultiNodeAgent, self).__init__(*args, **kwargs)
self.multinode_status = dict()
self.getMultinodeData()
self.agent_id = 'multinodeagent'
self.is_parent = False
self.last_sync_with_parent = datetime(1991, 1, 1) #equivalent to -ve infinitive
self.parent_node = None
self.recently_online_node_list = [] # initialize to lists to empty
self.recently_offline_node_list = [] # they will be filled as nodes are discovered to be online/offline
self.setup()
self.runPeriodically(self.send_heartbeat,20)
self.runPeriodically(self.check_health,60,start_immediately=False)
self.runPeriodically(self.sync_all_with_parent,600)
self.subscribe('relay_message',self.relayDirectMessage)
self.subscribe('update_multinode_data',self.updateMultinodeData)
self.runContinuously(self.pollClients)
self.run()
def getMultinodeData(self):
self.multinode_data = db_helper.get_multinode_data()
self.nodelist_dict = {node['name']:node for node in self.multinode_data['known_nodes']}
self.node_name_list = [node['name'] for node in self.multinode_data['known_nodes']]
self.address_list = [node['address'] for node in self.multinode_data['known_nodes']]
self.server_key_list = [node['server_key'] for node in self.multinode_data['known_nodes']]
self.node_name = self.multinode_data['this_node']
for index,node in enumerate(self.multinode_data['known_nodes']):
if node['name'] == self.node_name:
self.node_index = index
break
else:
raise ValueError('"this_node:" entry on the multinode_data json file is invalid')
for node_name in self.node_name_list: #initialize all nodes data
if node_name not in self.multinode_status: #initialize new nodes. There could be already the node if this getMultiNode
# data is being called later
self.multinode_status[node_name] = dict()
self.multinode_status[node_name]['health'] = -10 #initialized; never online/offline
self.multinode_status[node_name]['last_sync_time'] = datetime(1991,1,1)
self.multinode_status[node_name]['last_online_time'] = None
self.multinode_status[node_name]['last_offline_time'] = None
self.multinode_status[node_name]['last_scanned_time'] = None
def setup(self):
print "Setup"
base_dir = settings.PROJECT_DIR + "/"
public_keys_dir = os.path.abspath(os.path.join(base_dir, 'public_keys'))
secret_keys_dir = os.path.abspath(os.path.join(base_dir, 'private_keys'))
self.secret_keys_dir = secret_keys_dir
self.public_keys_dir = public_keys_dir
if not (os.path.exists(public_keys_dir) and
os.path.exists(secret_keys_dir)):
logging.critical("Certificates are missing - run generate_certificates.py script first")
sys.exit(1)
ctx = zmq.Context.instance()
self.ctx = ctx
# Start an authenticator for this context.
self.auth = ThreadAuthenticator(ctx)
self.auth.start()
self.configure_authenticator()
server = ctx.socket(zmq.PUB)
server_secret_key_filename = self.multinode_data['known_nodes'][self.node_index]['server_secret_key']
server_secret_file = os.path.join(secret_keys_dir, server_secret_key_filename)
server_public, server_secret = zmq.auth.load_certificate(server_secret_file)
server.curve_secretkey = server_secret
server.curve_publickey = server_public
server.curve_server = True # must come before bind
server.bind(self.multinode_data['known_nodes'][self.node_index]['address'])
self.server = server
self.configureClient()
def configure_authenticator(self):
self.auth.allow()
# Tell authenticator to use the certificate in a directory
self.auth.configure_curve(domain='*', location=self.public_keys_dir)
def disperseMessage(self, sender, topic, message):
for node_name in self.node_name_list:
if node_name == self.node_name:
continue
self.server.send(jsonify(sender, node_name+'/republish/'+topic,message))
def republishToParent(self,sender, topic,message):
if self.is_parent:
return #if I am parent, the message is already published
for node_name in self.node_name_list:
if self.multinode_status[node_name]['health'] == 2: #health = 2 is the parent node
self.server.send(jsonify(sender, node_name+'/republish/'+topic,message))
def sync_node_with_parent(self, node_name):
if self.is_parent:
print "Syncing " + node_name
self.last_sync_with_parent = datetime.now()
sync_date_string = self.last_sync_with_parent.strftime('%B %d, %Y, %H:%M:%S')
# os.system('pg_dump bemossdb -f ' + self.self_database_dump_path)
# with open(self.self_database_dump_path, 'r') as f:
# file_content = f.read()
# msg = {'database_dump': base64.b64encode(file_content)}
self.server.send(
jsonify(self.node_name, node_name + '/sync-with-parent/' + sync_date_string + '/'+self.node_name, ""))
def sync_all_with_parent(self,dbcon):
if self.is_parent:
self.last_sync_with_parent = datetime.now()
sync_date_string = self.last_sync_with_parent.strftime('%B %d, %Y, %H:%M:%S')
print "Syncing all nodes"
for node_name in self.node_name_list:
if node_name == self.node_name:
continue
# os.system('pg_dump bemossdb -f ' + self.self_database_dump_path)
# with open(self.self_database_dump_path, 'r') as f:
# file_content = f.read()
# msg = {'database_dump': base64.b64encode(file_content)}
self.server.send(
jsonify(self.node_name, node_name + '/sync-with-parent/' + sync_date_string + '/' + self.node_name, ""))
def send_heartbeat(self,dbcon):
#self.vip.pubsub.publish('pubsub', 'listener', None, {'message': 'Hello Listener'})
#print 'publishing'
print "Sending heartbeat"
last_sync_string = self.last_sync_with_parent.strftime('%B %d, %Y, %H:%M:%S')
self.server.send(jsonify(self.node_name,'heartbeat/' + self.node_name + '/' + str(self.is_parent) + '/' + last_sync_string,""))
def extract_ip(self,addr):
return re.search(r'([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})', addr).groups()[0]
def getNodeId(self, node_name):
for index, node in enumerate(self.multinode_data['known_nodes']):
if node['name'] == node_name:
node_index = index
break
else:
raise ValueError('the node name: ' + node_name + ' is not found in multinode data')
return node_index
def getNodeName(self, node_id):
return self.multinode_data['known_nodes'][node_id]['name']
def handle_offline_nodes(self, dbcon, node_name_list):
if self.is_parent:
# start all the agents belonging to that node on this node
command_group = []
for node_name in node_name_list:
node_id = self.getNodeId(node_name)
#put the offline event into cassandra events log table, and also create notification
self.EventRegister(dbcon, 'node-offline',reason='communication-error',source=node_name)
# get a list of agents that were supposedly running in that offline node
dbcon.execute("SELECT agent_id FROM " + node_devices_table + " WHERE assigned_node_id=%s",
(node_id,))
if dbcon.rowcount:
agent_ids = dbcon.fetchall()
for agent_id in agent_ids:
message = dict()
message[STATUS_CHANGE.AGENT_ID] = agent_id[0]
message[STATUS_CHANGE.NODE] = str(self.node_index)
message[STATUS_CHANGE.AGENT_STATUS] = 'start'
message[STATUS_CHANGE.NODE_ASSIGNMENT_TYPE] = ZONE_ASSIGNMENT_TYPES.TEMPORARY
command_group += [message]
dbcon.execute("UPDATE " + node_devices_table + " SET current_node_id=(%s), date_move=(%s)"
" WHERE agent_id=(%s)",
(self.node_index, datetime.now(pytz.UTC), agent_id[0]))
dbcon.commit()
print "moving agents from offline node to parent: " + str(node_name_list)
print command_group
if command_group:
self.bemoss_publish(target='networkagent', topic='status_change', message=command_group)
def handle_online_nodes(self, dbcon, node_name_list):
if self.is_parent:
# start all the agents belonging to that nodes back on them
command_group = []
for node_name in node_name_list:
node_id = self.getNodeId(node_name)
if self.node_index == node_id:
continue #don't handle self-online
self.EventRegister(dbcon, 'node-online',reason='communication-restored',source=node_name)
#get a list of agents that were supposed to be running in that online node
dbcon.execute("SELECT agent_id FROM " + node_devices_table + " WHERE assigned_node_id=%s",
(node_id,))
if dbcon.rowcount:
agent_ids = dbcon.fetchall()
for agent_id in agent_ids:
message = dict()
message[STATUS_CHANGE.AGENT_ID] = agent_id[0]
message[STATUS_CHANGE.NODE_ASSIGNMENT_TYPE] = ZONE_ASSIGNMENT_TYPES.PERMANENT
message[STATUS_CHANGE.NODE] = str(self.node_index)
message[STATUS_CHANGE.AGENT_STATUS] = 'stop' #stop in this node
command_group += [message]
message = dict(message) #create another copy
message[STATUS_CHANGE.NODE] = str(node_id)
message[STATUS_CHANGE.AGENT_STATUS] = 'start' #start in the target node
command_group += [message]
#immediately update the multnode device assignment table
dbcon.execute("UPDATE " + node_devices_table + " SET current_node_id=(%s), date_move=(%s)"
" WHERE agent_id=(%s)", (node_id, datetime.now(pytz.UTC), agent_id[0]))
dbcon.commit()
print "Moving agents back to the online node: " + str(node_name_list)
print command_group
if command_group:
self.bemoss_publish(target='networkagent',topic='status_change',message=command_group)
def updateParent(self,dbcon, parent_node_name):
parent_ip = self.extract_ip(self.nodelist_dict[parent_node_name]['address'])
write_new = False
if not os.path.isfile(settings.MULTINODE_PARENT_IP_FILE): # but parent file doesn't exists
write_new = True
else:
with open(settings.MULTINODE_PARENT_IP_FILE, 'r') as f:
read_ip = f.read()
if read_ip != parent_ip:
write_new = True
if write_new:
with open(settings.MULTINODE_PARENT_IP_FILE, 'w') as f:
f.write(parent_ip)
if dbcon:
dbcon.close() #close old connection
dbcon = db_helper.db_connection() #start new connection using new parent_ip
self.updateMultinodeData(sender=self.name,topic='update_parent',message="")
def check_health(self, dbcon):
for node_name, node in self.multinode_status.items():
if node['health'] > 0 : #initialize all online nodes to 0. If they are really online, they should change it
# back to 1 or 2 (parent) within 30 seconds throught the heartbeat.
node['health'] = 0
time.sleep(30)
parent_node_name = None #initialize parent node
online_node_exists = False
for node_name, node in self.multinode_status.items():
node['last_scanned_time'] = datetime.now()
if node['health'] == 0:
node['health'] = -1
node['last_offline_time'] = datetime.now()
self.recently_offline_node_list += [node_name]
elif node['health'] == -1: #offline since long
pass
elif node['health'] == -10: #The node was initialized to -10, and never came online. Treat it as recently going
# offline for this iteration so that the agents that were supposed to be running there can be migrated
node['health'] = -1
self.recently_offline_node_list += [node_name]
elif node['health'] == 2: #there is some parent node present
parent_node_name = node_name
if node['health'] >0:
online_node_exists = True #At-least one node (itself) should be online, if not some problem
assert online_node_exists, "At least one node (current node) must be online"
if not parent_node_name: #parent node doesn't exist
#find a suitable node to elect a parent. The node with latest update from previous parent wins. If there is
#tie, then the node coming earlier in the node-list on multinode data wins
online_node_last_sync = dict() #only the online nodes, and their last-sync-times
for node_name, node in self.multinode_status.items(): #copy only the online nodes
if node['health'] > 0:
online_node_last_sync[node_name] = node['last_sync_time']
latest_node = max(online_node_last_sync,key=online_node_last_sync.get)
latest_sync_date = online_node_last_sync[latest_node]
for node_name in self.node_name_list:
if self.multinode_status[node_name]['health'] <= 0: #dead nodes can't be parents
continue
if self.multinode_status[node_name]['last_sync_time'] == latest_sync_date: # this is the first node with the latest update from parent
#elligible parent found
self.updateParent(dbcon, node_name)
if node_name == self.node_name: # I am the node, so I get to become the parent
self.is_parent = True
print "I am the boss now, " + self.node_name
break
else: #I-am-not-the-first-node with latest update; somebody else is
self.is_parent = False
break
else: #parent node exist
self.updateParent(dbcon,parent_node_name)
for node in self.multinode_data['known_nodes']:
print node['name'] + ': ' + str(self.multinode_status[node['name']]['health'])
if self.is_parent:
#if this is a parent node, update the node_info table
if dbcon is None: #if no database connection exists make connection
dbcon = | |
por mes por los siguientes gastos.",
"zh-s": "### 现在,请估算您每个月的以下花费是多少。",
"zh-t": "### 現在,請估算您每個月的以下開支。"
},
"you_take_home_pre": {
"en": "You said you take home",
"es": "Usted dijo que gana ",
"zh-s": "您说您每个月带回家${ format_money(income) }。如果您没有此项花费",
"zh-t": "您說每個月帶回家"
},
"you_take_home_post": {
"en": " each month. If you don't spend money on an expense, enter $0.",
"es": " por mes, después de deducir impuestos. Si no tiene este gasto, ponga $0.",
"zh-s": " 请输入$0。",
"zh-t": "。如果您沒有此項開支,請輸入$0。 "
},
"rent": {
"en": "Rent",
"es": "Alquiler",
"zh-s": "租金",
"zh-t": "租金"
},
"mortgage": {
"en": "Mortgage",
"es": "Hipoteca",
"zh-s": "按揭",
"zh-t": "房屋貸款"
},
"utilities": {
"en": "Utilities",
"es": "Servicios públicos",
"zh-s": "公用事业",
"zh-t": "公用事業"
},
"phone_bill": {
"en": "Phone Bill",
"es": "Teléfono",
"zh-s": "电话费",
"zh-t": "電話費"
},
"food_groceries_restaurants": {
"en": "Food (Groceries & Restaurants)",
"es": "Alimentos (supermercado y restaurantes)",
"zh-s": "食品(食品店和饭店)",
"zh-t": "食品(雜貨店和餐廳)"
},
"insurance": {
"en": "Insurance",
"es": "Seguro",
"zh-s": "保险",
"zh-t": "保險"
},
"clothing": {
"en": "Clothing",
"es": "Ropa",
"zh-s": "衣服",
"zh-t": "衣服"
},
"child_spousal_support": {
"en": "Child or Spousal Support",
"es": "Manutención de los hijos o del cónyuge",
"zh-s": "子女或配偶扶养费",
"zh-t": "子女或配偶扶養費"
},
"transportation_gas_etc": {
"en": "Transportation (Gas, Car Payments, Transit)",
"es": "Transporte (gasolina, pagos del carro, transporte público)",
"zh-s": "交通(汽油、车款、公交)",
"zh-t": "交通(汽油、汽車還款、公交)"
},
"would_other_hardship": {
"en": "Would you like to report other expenses or reasons for financial hardship?",
"es": "¿Quiere reportar otros gastos o razones para explicar sus dificultades económicas?",
"zh-s": "您是否想要报告其他花费或财务困难的原因?",
"zh-t": "您是否願意報告其他開支或財務困難的原因?"
},
"what_hardship": {
"en": "What other financial hardship would you like the Court to consider?",
"es": "¿Qué otro tipo de problema económico quiere que considere la corte? Nota: debe responder esta pregunta en inglés.",
"zh-s": "您希望法院考虑哪些其他种类的财务困难?",
"zh-t": "您希望法院考慮其他哪一種財務困難?"
},
"total_additional_expenses": {
"en": "What is the total monthly cost of these additional expenses?",
"es": "¿Cuánto es el costo total mensual de estos gastos adicionales?",
"zh-s": "",
"zh-t": ""
},
"additional_requests": {
"en": "Additional Requests",
"es": "Solicitudes adicionales",
"zh-s": "附加请求",
"zh-t": "額外請求"
},
"would_like_additional": {
"en": "In addition to a fine reduction, would you like to add any of the following requests to the current or reduced amount?",
"es": "Además de solicitar una multa reducida, ¿desea pedir una de las siguientes opciones a la corte?",
"zh-s": "",
"zh-t": "您是否想在目前或減低的金額之外增加以下請求:"
},
"payment_plan": {
"en": "Payment Plan",
"es": "Plan de pagos",
"zh-s": "支付计划",
"zh-t": "付款計劃"
},
"community_service": {
"en": "Community Service",
"es": "Servicio comunitario",
"zh-s": "社区服务",
"zh-t": "社區服務"
},
"extension": {
"en": "Extension",
"es": "Aplazamiento de pago de la multa",
"zh-s": "延期",
"zh-t": "延期"
},
"administrative_fees": {
"en": "Note: Your court may charge administrative fees for setting up a payment plan or community service work plan.",
"es": "Nota: Su corte puede cobrar una cuota para establecer un plan de pagos o un plan de servicio comunitario.",
"zh-s": "备注:您的法院可就设定支付计划或者社区服务工作计划收取管理费。",
"zh-t": "備註:您的法院可能會收取設定付款計劃或社區服務工作計劃的管理費。"
},
"make_plea": {
"en": "### Make a Plea for {citation_number}",
"es": "### Haga su declaración {citation_number}",
"zh-s": "### 进行答辩 {citation_number}",
"zh-t": "### 進行答辯 {citation_number}"
},
"plea_instructions": {
"en": """
In order to submit your fine reduction request, you need to admit responsibility for the ticket by pleading **Guilty** or **No Contest**.
If you do not want to admit responsibility or if you do not understand these rights, please exit the system and contact your court to set up an in-person court appearance.
By pleading you will be giving up the following rights:
* To be represented by an attorney employed by you;
* To have a speedy and public trial in front of a judge;
* To testify, to present evidence, and to use court orders without cost to compel the attendance of witnesses and the production of evidence on your behalf;
* To have the witnesses against you testify under oath in court, and to question such witnesses;
* To remain silent and not testify and not incriminate yourself.
""",
"es": """
Para presentar su solicitud de reducción de multa, tiene que admitir su responsabilidad por la citación y declararse culpable o sin disputa. Si no quiere admitir responsabilidad o no comprende estos derechos, deje de usar este programa y comuníquese con la corte para programar una comparecencia en persona.
Al declararse culpable o sin disputa, estará renunciando a los siguientes derechos:
* representación por un abogado contratado por usted;
* un juicio público y sin demora delante de un juez;
* dar testimonio, presentar pruebas, y usar órdenes de la corte sin costo para obligar la asistencia de testigos y la presentación de pruebas en su nombre;
* el testimonio bajo juramento en la corte de testigos en su contra, y la interrogación de dichos testigos;
* guardar silencio y no testificar ni incriminarse.
""",
"zh-s": """
为提交您的减少罚款请求,您需要通过有罪或无异议答辩承认对罚单的责任。如果您不想承认责任,或者您不理解这些权利,请退出系统,联系法院安排亲自出庭。
通过答辩,您将放弃以下权利:
* 由您聘请的律师代理;
* 由法官进行快速、公开审理;
* 作证,出示证据,免费使用法庭命令强制证人为您出庭和举证;
* 让对您不利的证人在法庭宣誓作证,并质问该证人;
* 保持沉默,不作证,不自证有罪。
""",
"zh-t": """
為提交您的減少罰款請求,您需要透過有罪或無異議答辯承認您對罰單的責任。如果您不想承認責任,或是不理解這些權利,請退出系統,聯繫法院安排親自出庭。
透過答辯,您將放棄以下權利:
* 由您聘請的律師代理;
* 由法官進行快速、公開的審理;
* 作證,出示證據,免費使用法庭命令強制證人為您出庭和舉證;
* 讓對您不利的證人在法庭宣誓作證,並質問該證人;
* 保持沉默,不作證,不自證有罪。
"""
},
"Make_plea_choice": {
"en": "Make a choice between pleading Guilty or No Contest. A no contest plea is a way of saying, 'I don’t believe I did all that the officer charges, but I admit violating the law.'",
"es": "Decida si se va a declarar culpable o sin disputa. Declararse ‘sin disputa’ es una manera de decir “no creo haber hecho todo lo que me acusa el agente, pero admito que violé la ley”.",
"zh-s": "在有罪或无异议答辩之间做出选择。无异议答辩是表示:“我不认为我做了官员指控的一切,但我承认违反法律。”",
"zh-t": "在有罪或無異議答辯之間做出選擇。無異議答辯是表示:「我不認為我做了官員指控的一切,但我承認違反法律。」"
},
"no_content_plea": {
"en": "**No Contest Plea.** I have read, understand, and waive the rights above, there are facts to support my plea, I am entering my plea freely and voluntarily, and I agree to plead “no contest”. I understand that, for purposes of this case, a plea of no contest will be considered the same as a plea of guilty and that if I plead no contest the court will find me guilty.",
"es": "**Sin disputa.** He leído, comprendo y renuncio a los derechos descritos arriba; hay hechos que justifican mi declaración. Hago esta declaración en forma libre y voluntaria, y acepto hacer una declaración de ‘sin disputa’. Comprendo que una declaración de sin disputa en este caso se interpretará de la misma manera que una declaración de culpable, y que si me declaro sin disputa la corte me declarará culpable.",
"zh-s": "**无异议答辩。** 我已阅读、理解并放弃以上权利,有事实支撑我的答辩,我的答辩是自由、自愿做出的,并且我同意“无异议”答辩。我理解,就本案而言,无异议答辩将被视同有罪答辩,并且如果我进行无异议答辩,法院将认定我有罪。",
"zh-t": "**無異議答辯。** 我已閱讀、理解並放棄以上權利,有事實支持我的答辯,我的答辯是自由、自願做出的,而且我同意「無異議」答辯。我理解,就本案而言,無異議答辯將被視同有罪答辯,如果我進行無異議答辯,法院將認定我有罪。"
},
"guilty_plea": {
"en": "**Guilty Plea.** I have read, understand, and waive the rights above, there are facts to support my plea. I am entering my plea freely and voluntarily, and agree to plead guilty.",
"es": "**Declaración de culpable.** He leído, comprendo y renuncio a los derechos descritos arriba; hay hechos que justifican mi declaración. Hago esta declaración en forma libre y voluntaria, y acepto hacer una declaración de culpable.",
"zh-s": "**有罪答辩。** 我已阅读、理解并放弃以上权利,有事实支撑我的答辩。我的答辩是自由、自愿做出的,并且我同意有罪答辩。",
"zh-t": "**有罪答辯。** 我已閱讀、理解並放棄以上權利,有事實支持我的答辯。我的答辯是自由、自願做出的,而且我同意有罪答辯。"
},
"admit_responsibility": {
"en": "Note: Once you admit responsibility, you will have a conviction for this traffic offense that will be reported the Department of Motor Vehicles (DMV).",
"es": "Nota: Una vez que admita responsabilidad, lo condenarán por esta infracción de tránsito y la condena será reportada al Departamento de Vehículos Motorizados (DMV).",
"zh-s": "备注:一旦您承认责任,您将被认定实施了该交通犯罪,这将被报告给机动车管理局(DMV)。",
"zh-t": "備註:一旦您承認責任,您將被認定實施了該交通犯罪,這會報告給機動車輛管理局(DMV)。"
},
"optional_questions": {
"en": "### Optional Questions",
"es": "### Preguntas opcionales",
"zh-s": "### 选答问题",
"zh-t": "### 選答題"
},
"info_confidential": {
"en": "## Your information will be kept confidential and may be used for research conducted to improve court services.",
"es": "## Su información será confidencial y se puede usar para investigaciones con el fin de mejorar los servicios de la corte.",
"zh-s": "## 您的信息将被保密,可能被用于为改善法院服务而进行的研究。",
"zh-t": "## 您的資訊將會保密,可能被用於為改善法院服務而進行的研究。"
},
"how_helpful": {
"en": "How helpful was this tool in addressing your traffic ticket?",
"es": "¿Qué tan útil fue este servicio para resolver su multa de tránsito?",
"zh-s": "该工具对解决您的交通罚单有多大帮助?",
"zh-t": "本工具對解決您的交通罰單有多大幫助?"
},
"very_helpful": {
"en": "Very helpful",
"es": "Muy útil",
"zh-s": "很有帮助",
"zh-t": "很有幫助"
},
"somewhat_helpful": {
"en": "Somewhat helpful",
"es": "Algo útil",
"zh-s": "有些帮助",
"zh-t": "有點幫助"
},
"as_helpful_as_court": {
"en": "As helpful as coming into court",
"es": "Tan útil como ir a la corte",
"zh-s": "跟去法院的帮助相同",
"zh-t": "和去法院的幫助相同"
},
"somewhat_unhelpful": {
"en": "Somewhat unhelpful",
"es": "No muy útil",
"zh-s": "不大有帮助",
"zh-t": "不太有幫助"
},
"not_helpful": {
"en": "Not helpful at all",
"es": "Completamente inútil",
"zh-s": "根本没有帮助",
"zh-t": "根本沒有幫助"
},
"say_more_about_difficulty": {
| |
<reponame>oldarmyc/cap
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
sample_product = {
"title": "Test",
"us_url": "http://us.test.com",
"uk_url": "http://uk.test.com",
"active": True,
"db_name": "test",
"require_region": True,
"doc_url": "http://doc.test.com",
"pitchfork_url": "https://pitchfork/url"
}
sample_limit = {
"product": "test",
"title": "Test",
"uri": "/limits",
"slug": "test",
"active": True,
"absolute_path": "test/path",
"absolute_type": "list",
"limit_key": "test_limit",
"value_key": "test_value"
}
sample_log = {
"queried": ["dns"],
"queried_by": "skeletor",
"region": "dfw",
"ddi": "123456",
'query_results': []
}
sample_auth_failure = {
'message': (
'<strong>Error!</strong> Authentication has failed due to'
' incorrect token or DDI. Please check the token and DDI '
'and try again.'
)
}
""" DNS Tests """
dns = {
"title": "DNS",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "dns",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
dns_limit = {
"product": "dns",
"title": "Domains",
"uri": "/limits",
"slug": "domains",
"active": True,
"absolute_path": "limits.absolute",
"absolute_type": "dict",
"value_key": "",
"limit_key": "domains"
}
dns_limit_return = {
"limits": {
"rate": [
{
"regex": ".*/v\\d+\\.\\d+/(\\d+/domains/search).*",
"limit": [
{
"value": 20,
"verb": "GET",
"next-available": "2016-01-12T13:56:11.450Z",
"remaining": 20,
"unit": "MINUTE"
}
],
"uri": "*/domains/search*"
}
],
"absolute": {
"domains": 500,
"records per domain": 500
}
}
}
dns_list_return = {
"domains": [
{
"comment": "Test",
"updated": "2015-12-08T20:47:02.000+0000",
"name": "test.net",
"created": "2015-04-09T15:42:49.000+0000",
"emailAddress": "<EMAIL>",
"id": 123465798,
"accountId": 1234567
}
],
"totalEntries": 1
}
dns_full_return = {
'dns': {
'values': {'Domains': 1},
'limits': {'Domains': 500}
}
}
""" Autoscale """
autoscale = {
"title": "Autoscale",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "autoscale",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
autoscale_limit = {
"product": "autoscale",
"title": "Max Groups",
"absolute_path": "limits.absolute",
"uri": "/v1.0/{ddi}/limits",
"slug": "max_groups",
"value_key": "",
"absolute_type": "dict",
"active": True,
"limit_key": "maxGroups"
}
autoscale_limit_return = {
"limits": {
"rate": [
{
"regex": "/v1\\.0/execute/(.*)",
"limit": [
{
"value": 10,
"verb": "ALL",
"next-available": "2016-01-12T14:51:13.402Z",
"remaining": 10,
"unit": "SECOND"
}
],
"uri": "/v1.0/execute/*"
}
],
"absolute": {
"maxGroups": 1000,
"maxPoliciesPerGroup": 100,
"maxWebhooksPerPolicy": 25
}
}
}
autoscale_list_return = {
"groups": [
{
"state": {
"status": "ACTIVE",
"desiredCapacity": 0,
"paused": False,
"active": [],
"pendingCapacity": 0,
"activeCapacity": 0,
"name": "test"
},
"id": "d446f3c2-612f-41b8-92dc-4d6e1422bde2",
"links": [
{
"href": (
'https://dfw.autoscale.api.rackspacecloud.com/v1.0'
'/1234567/groups/d446f3c2-612f-41b8-92dc-4d6e1422bde2/'
),
"rel": "self"
}
]
}
],
"groups_links": []
}
autoscale_full_return = {
'autoscale': {
'values': {'Max Groups': 1},
'limits': {'Max Groups': 1000}
}
}
""" Big Data """
big_data = {
"title": "Big Data",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "big_data",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
big_data_limit = [
{
"product": "big_data",
"title": "Node Count",
"absolute_path": "limits.absolute.node_count",
"uri": "/v2/{ddi}/limits",
"slug": "node_count",
"value_key": "remaining",
"absolute_type": "dict",
"active": True,
"limit_key": "limit"
}, {
"product": "big_data",
"title": "Disk - MB",
"absolute_path": "limits.absolute.disk",
"uri": "/v2/{ddi}/limits",
"slug": "disk_-_mb",
"value_key": "remaining",
"absolute_type": "dict",
"active": True,
"limit_key": "limit"
}
]
big_data_limit_return = {
"limits": {
"absolute": {
"node_count": {
"limit": 15,
"remaining": 8
},
"disk": {
"limit": 50000,
"remaining": 25000
},
"ram": {
"limit": 655360,
"remaining": 555360
},
"vcpus": {
"limit": 200,
"remaining": 120
}
}
}
}
big_data_full_return = {
'big_data': {
'values': {'Node Count': 7, 'Disk - MB': 25000},
'limits': {'Node Count': 15, 'Disk - MB': 50000}
}
}
""" CBS """
cbs = {
"title": "CBS",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "cbs",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
cbs_limit = {
"product": "cbs",
"title": "SATA - GB",
"absolute_path": "quota_set.gigabytes_SATA",
"uri": "/v1/{ddi}/os-quota-sets/{ddi}?usage=True",
"slug": "sata_-_gb",
"value_key": "in_use",
"absolute_type": "dict",
"active": True,
"limit_key": "limit"
}
cbs_limit_return = {
"quota_set": {
"volumes": {
"limit": -1,
"reserved": 0,
"in_use": 3
},
"gigabytes_SATA": {
"limit": 10240,
"reserved": 0,
"in_use": 325
},
"gigabytes_SSD": {
"limit": 10240,
"reserved": 0,
"in_use": 50
}
}
}
cbs_full_return = {
'cbs': {
'values': {'SATA - GB': 9915},
'limits': {'SATA - GB': 10240}
}
}
""" Load Balancers """
clb = {
"title": "Load Balancers",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "load_balancers",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
clb_limit = [
{
"product": "load_balancers",
"title": "Total Load Balancers",
"uri": "/v1.0/{ddi}/loadbalancers/absolutelimits",
"slug": "total_load_balancers",
"active": True,
"path": "absolute['LOADBALANCER_LIMIT']",
"absolute_path": "absolute",
"value_key": "",
"absolute_type": "list",
"limit_key": "LOADBALANCER_LIMIT"
}, {
"product": "load_balancers",
"title": "Nodes per LB",
"uri": "/v1.0/{ddi}/loadbalancers/absolutelimits",
"slug": "nodes_per_lb",
"active": True,
"path": "absolute['NODE_LIMIT']",
"absolute_path": "absolute",
"value_key": "",
"absolute_type": "list",
"limit_key": "NODE_LIMIT"
}
]
clb_limit_return = {
"absolute": [
{
"name": "IPV6_LIMIT",
"value": 25
}, {
"name": "LOADBALANCER_LIMIT",
"value": 25
}, {
"name": "BATCH_DELETE_LIMIT",
"value": 10
}, {
"name": "ACCESS_LIST_LIMIT",
"value": 100
}, {
"name": "NODE_LIMIT",
"value": 25
}, {
"name": "NODE_META_LIMIT",
"value": 25
}, {
"name": "LOADBALANCER_META_LIMIT",
"value": 25
}, {
"name": "CERTIFICATE_MAPPING_LIMIT",
"value": 20
}
]
}
clb_list_return = {
"loadBalancers": [
{
"status": "ACTIVE",
"updated": {
"time": "2016-01-12T16:04:44Z"
},
"protocol": "HTTP",
"name": "test",
"algorithm": "LEAST_CONNECTIONS",
"created": {
"time": "2016-01-12T16:04:44Z"
},
"virtualIps": [
{
"ipVersion": "IPV4",
"type": "PUBLIC",
"id": 19875,
"address": "172.16.31.10"
}, {
"ipVersion": "IPV6",
"type": "PUBLIC",
"id": 9318325,
"address": "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"
}
],
"id": 506497,
"timeout": 30,
"nodeCount": 0,
"port": 80
}
]
}
clb_full_return = {
'load_balancers': {
'values': {'Total Load Balancers': 1},
'limits': {'Total Load Balancers': 25, 'Nodes per LB': 25}
}
}
""" Servers """
server = {
"title": "Servers",
"us_url": "https://us.test.com",
"uk_url": "https://uk.test.com",
"active": True,
"db_name": "servers",
"require_region": True,
"doc_url": "https://doc.test.com",
"pitchfork_url": "https://pitchfork.url",
"limit_maps": []
}
server_limit = [
{
"product": "servers",
"title": "Servers",
"uri": "/v2/{ddi}/limits",
"slug": "servers",
"active": True,
"path": "absolute['maxTotalInstances']",
"absolute_path": "limits.absolute",
"value_key": "",
"absolute_type": "dict",
"limit_key": "maxTotalInstances"
}, {
"product": "servers",
"title": "Private Networks",
"uri": "/v2/{ddi}/limits",
"slug": "private_networks",
"active": True,
"path": "absolute['maxTotalPrivateNetworks']",
"absolute_path": "limits.absolute",
"value_key": "",
"absolute_type": "dict",
"limit_key": "maxTotalPrivateNetworks"
}, {
"product": "servers",
"title": "Ram - MB",
"uri": "/v2/{ddi}/limits",
"slug": "ram_-_mb",
"active": True,
"path": "absolute['maxTotalRAMSize']",
"absolute_path": "limits.absolute",
"value_key": "",
"absolute_type": "dict",
"limit_key": "maxTotalRAMSize"
}
]
server_limit_return = {
"limits": {
"rate": [
{
"regex": "/[^/]*/?$",
"limit": [
{
"next-available": "2016-01-12T16:14:47.624Z",
"unit": "MINUTE",
"verb": "GET",
"remaining": 2200,
"value": 2200
}
],
"uri": "*"
}, {
"regex": (
"/v[^/]+/[^/]+/servers/([^/]+)/rax-si-image-schedule"
),
"limit": [
{
"next-available": "2016-01-12T16:14:47.624Z",
"unit": "SECOND",
"verb": "POST",
"remaining": 10,
"value": 10
}
],
"uri": "/servers/{id}/rax-si-image-schedule"
}
],
"absolute": {
"maxPersonalitySize": 1000,
"maxTotalCores": -1,
"maxPersonality": 5,
"totalPrivateNetworksUsed": 1,
"maxImageMeta": 40,
"maxTotalPrivateNetworks": 10,
"maxSecurityGroupRules": -1,
"maxTotalKeypairs": 100,
"totalRAMUsed": 4096,
"maxSecurityGroups": -1,
"totalFloatingIpsUsed": 0,
"totalInstancesUsed": 3,
"totalSecurityGroupsUsed": 0,
"maxServerMeta": 40,
"maxTotalFloatingIps": -1,
"maxTotalInstances": 200,
"totalCoresUsed": 4,
"maxTotalRAMSize": 256000
}
}
}
server_list_return = {
"servers": [
{
"OS-EXT-STS:task_state": None,
"addresses": {
"public": [
{
"version": 4,
"addr": "192.168.3.11"
}, {
"version": 6,
"addr": "fc00:e968:6179::de52:7100"
}
],
"private": [
{
"version": 4,
"addr": "10.176.205.68"
}
]
},
"flavor": {
"id": "general1-1",
"links": [
{
"href": (
"https://iad.servers.api.rackspacecloud.com"
"/766030/flavors/general1-1"
),
"rel": "bookmark"
}
]
},
"id": "3290e50d-888f-4500-a934-16c10f3b8a10",
"user_id": "284275",
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": "192.168.3.11",
"accessIPv6": "fc00:e968:6179::de52:7100",
"progress": 100,
"OS-EXT-STS:power_state": 1,
"config_drive": "",
"status": "ACTIVE",
"updated": "2016-01-12T15:16:37Z",
"name": "test-server",
"created": "2016-01-12T15:15:39Z",
"tenant_id": "1234567",
"metadata": {
"build_config": "",
"rax_service_level_automation": "Complete"
}
}
]
}
server_list_processed_return = [
{
'status': 'ACTIVE',
'updated': '2016-01-12T15:16:37Z',
'OS-EXT-STS:task_state': None,
'user_id': '284275',
'addresses': {
'public': [
{
'version': 4,
'addr': '192.168.3.11'
}, {
'version': 6,
'addr': 'fc00:e968:6179::de52:7100'
}
],
'private': [
{
'version': 4,
'addr': '10.176.205.68'
}
]
},
'created': '2016-01-12T15:15:39Z',
'tenant_id': '1234567',
'OS-DCF:diskConfig': 'MANUAL',
'id': '3290e50d-888f-4500-a934-16c10f3b8a10',
'accessIPv4': '192.168.3.11',
'accessIPv6': 'fc00:e968:6179::de52:7100',
'config_drive': '',
'progress': 100,
'OS-EXT-STS:power_state': 1,
'metadata': {
'build_config': '',
'rax_service_level_automation': 'Complete'
},
'flavor': {
'id': 'general1-1',
'links': [
{
'href': (
'https://iad.servers.api.rackspacecloud.com'
'/766030/flavors/general1-1'
),
'rel': 'bookmark'
}
]
},
'name': 'test-server'
}
]
network_list_return = {
"networks": [
{
"status": "ACTIVE",
"subnets": [
"879ff280-6f17-4fd8-b684-19237d88fc45"
],
"name": "test-network",
"admin_state_up": True,
"tenant_id": "1234567",
"shared": False,
"id": "e737483a-00d7-4517-afc3-bd1fbbbd4cd3"
}
]
}
network_processed_list = [
{
'status': 'ACTIVE',
'subnets': [
'879ff280-6f17-4fd8-b684-19237d88fc45'
],
'name': 'test-network',
'admin_state_up': True,
'tenant_id': '1234567',
'shared': False,
'id': 'e737483a-00d7-4517-afc3-bd1fbbbd4cd3'
}
]
server_flavor_return = {
"flavor": {
"ram": 1024,
"name": "1 GB General Purpose v1",
"OS-FLV-WITH-EXT-SPECS:extra_specs": {
"number_of_data_disks": "0",
"class": "general1",
"disk_io_index": "40",
"policy_class": "general_flavor"
},
"vcpus": 1,
"swap": "",
"rxtx_factor": 200.0,
"OS-FLV-EXT-DATA:ephemeral": 0,
"disk": 20,
"id": "general1-1"
}
}
server_full_return = {
'servers': {
'values': {
'Private Networks': 1,
'Ram - MB': 1024,
'Servers': 1
},
'limits': {
'Private Networks': 10,
'Ram | |
or node. Conflicts with `availability_zone`. Changing this creates a
new server.
"""
return pulumi.get(self, "availability_zone_hints")
@availability_zone_hints.setter
def availability_zone_hints(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_zone_hints", value)
@property
@pulumi.getter(name="blockDevices")
def block_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceBlockDeviceArgs']]]]:
"""
Configuration of block devices. The block_device
structure is documented below. Changing this creates a new server.
You can specify multiple block devices which will create an instance with
multiple disks. This configuration is very flexible, so please see the
following [reference](https://docs.openstack.org/nova/latest/user/block-device-mapping.html)
for more information.
"""
return pulumi.get(self, "block_devices")
@block_devices.setter
def block_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceBlockDeviceArgs']]]]):
pulumi.set(self, "block_devices", value)
@property
@pulumi.getter(name="configDrive")
def config_drive(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to use the config_drive feature to
configure the instance. Changing this creates a new server.
"""
return pulumi.get(self, "config_drive")
@config_drive.setter
def config_drive(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "config_drive", value)
@property
@pulumi.getter(name="flavorId")
def flavor_id(self) -> Optional[pulumi.Input[str]]:
"""
The flavor ID of
the desired flavor for the server. Changing this resizes the existing server.
"""
return pulumi.get(self, "flavor_id")
@flavor_id.setter
def flavor_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "flavor_id", value)
@property
@pulumi.getter(name="flavorName")
def flavor_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the
desired flavor for the server. Changing this resizes the existing server.
"""
return pulumi.get(self, "flavor_name")
@flavor_name.setter
def flavor_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "flavor_name", value)
@property
@pulumi.getter(name="floatingIp")
def floating_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "floating_ip")
@floating_ip.setter
def floating_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "floating_ip", value)
@property
@pulumi.getter(name="forceDelete")
def force_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to force the OpenStack instance to be
forcefully deleted. This is useful for environments that have reclaim / soft
deletion enabled.
"""
return pulumi.get(self, "force_delete")
@force_delete.setter
def force_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_delete", value)
@property
@pulumi.getter(name="imageId")
def image_id(self) -> Optional[pulumi.Input[str]]:
"""
(Optional; Required if `image_name` is empty and not booting
from a volume. Do not specify if booting from a volume.) The image ID of
the desired image for the server. Changing this creates a new server.
"""
return pulumi.get(self, "image_id")
@image_id.setter
def image_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_id", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
(Optional; Required if `image_id` is empty and not booting
from a volume. Do not specify if booting from a volume.) The name of the
desired image for the server. Changing this creates a new server.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
@property
@pulumi.getter(name="keyPair")
def key_pair(self) -> Optional[pulumi.Input[str]]:
"""
The name of a key pair to put on the server. The key
pair must already be created and associated with the tenant's account.
Changing this creates a new server.
"""
return pulumi.get(self, "key_pair")
@key_pair.setter
def key_pair(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_pair", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Metadata key/value pairs to make available from
within the instance. Changing this updates the existing server metadata.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The human-readable
name of the network. Changing this creates a new server.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkMode")
def network_mode(self) -> Optional[pulumi.Input[str]]:
"""
Special string for `network` option to create
the server. `network_mode` can be `"auto"` or `"none"`.
Please see the following [reference](https://docs.openstack.org/api-ref/compute/?expanded=create-server-detail#id11) for more information. Conflicts with `network`.
"""
return pulumi.get(self, "network_mode")
@network_mode.setter
def network_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_mode", value)
@property
@pulumi.getter
def networks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceNetworkArgs']]]]:
"""
An array of one or more networks to attach to the
instance. The network object structure is documented below. Changing this
creates a new server.
"""
return pulumi.get(self, "networks")
@networks.setter
def networks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceNetworkArgs']]]]):
pulumi.set(self, "networks", value)
@property
@pulumi.getter
def personalities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstancePersonalityArgs']]]]:
"""
Customize the personality of an instance by
defining one or more files and their contents. The personality structure
is described below.
"""
return pulumi.get(self, "personalities")
@personalities.setter
def personalities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstancePersonalityArgs']]]]):
pulumi.set(self, "personalities", value)
@property
@pulumi.getter(name="powerState")
def power_state(self) -> Optional[pulumi.Input[str]]:
"""
Provide the VM state. Only 'active' and 'shutoff'
are supported values. *Note*: If the initial power_state is the shutoff
the VM will be stopped immediately after build and the provisioners like
remote-exec or files are not supported.
"""
return pulumi.get(self, "power_state")
@power_state.setter
def power_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "power_state", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to create the server instance. If
omitted, the `region` argument of the provider is used. Changing this
creates a new server.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="schedulerHints")
def scheduler_hints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSchedulerHintArgs']]]]:
"""
Provide the Nova scheduler with hints on how
the instance should be launched. The available hints are described below.
"""
return pulumi.get(self, "scheduler_hints")
@scheduler_hints.setter
def scheduler_hints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSchedulerHintArgs']]]]):
pulumi.set(self, "scheduler_hints", value)
@property
@pulumi.getter(name="securityGroups")
def security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of one or more security group names
to associate with the server. Changing this results in adding/removing
security groups from the existing server. *Note*: When attaching the
instance to networks using Ports, place the security groups on the Port
and not the instance. *Note*: Names should be used and not ids, as ids
trigger unnecessary updates.
"""
return pulumi.get(self, "security_groups")
@security_groups.setter
def security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_groups", value)
@property
@pulumi.getter(name="stopBeforeDestroy")
def stop_before_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to try stop instance gracefully
before destroying it, thus giving chance for guest OS daemons to stop correctly.
If instance doesn't stop within timeout, it will be destroyed anyway.
"""
return pulumi.get(self, "stop_before_destroy")
@stop_before_destroy.setter
def stop_before_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stop_before_destroy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of string tags for the instance. Changing this
updates the existing instance tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="userData")
def user_data(self) -> Optional[pulumi.Input[str]]:
"""
The user data to provide when launching the instance.
Changing this creates a new server.
"""
return pulumi.get(self, "user_data")
@user_data.setter
def user_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_data", value)
@property
@pulumi.getter(name="vendorOptions")
def vendor_options(self) -> Optional[pulumi.Input['InstanceVendorOptionsArgs']]:
"""
Map of additional vendor-specific options.
Supported options are described below.
"""
return pulumi.get(self, "vendor_options")
@vendor_options.setter
def vendor_options(self, value: Optional[pulumi.Input['InstanceVendorOptionsArgs']]):
pulumi.set(self, "vendor_options", value)
@property
@pulumi.getter
def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceVolumeArgs']]]]:
return pulumi.get(self, "volumes")
@volumes.setter
def volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceVolumeArgs']]]]):
pulumi.set(self, "volumes", value)
@pulumi.input_type
class _InstanceState:
def __init__(__self__, *,
access_ip_v4: Optional[pulumi.Input[str]] = None,
access_ip_v6: Optional[pulumi.Input[str]] = None,
admin_pass: Optional[pulumi.Input[str]] = None,
all_metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
all_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
availability_zone_hints: Optional[pulumi.Input[str]] = None,
block_devices: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceBlockDeviceArgs']]]] = None,
config_drive: Optional[pulumi.Input[bool]] = None,
flavor_id: Optional[pulumi.Input[str]] = None,
flavor_name: Optional[pulumi.Input[str]] = None,
floating_ip: Optional[pulumi.Input[str]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
image_id: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
key_pair: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
network_mode: Optional[pulumi.Input[str]] = None,
networks: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceNetworkArgs']]]] = None,
personalities: Optional[pulumi.Input[Sequence[pulumi.Input['InstancePersonalityArgs']]]] = None,
power_state: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
scheduler_hints: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSchedulerHintArgs']]]] = None,
security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stop_before_destroy: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
user_data: Optional[pulumi.Input[str]] = None,
vendor_options: Optional[pulumi.Input['InstanceVendorOptionsArgs']] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceVolumeArgs']]]] = None):
"""
Input properties used for looking up and filtering Instance resources.
:param pulumi.Input[str] access_ip_v4: The first detected Fixed IPv4 address.
:param pulumi.Input[str] access_ip_v6: The first detected Fixed IPv6 address.
:param pulumi.Input[str] admin_pass: <PASSWORD> to the server.
Changing this changes the root password on the existing server.
:param pulumi.Input[Sequence[pulumi.Input[str]]] all_tags: The collection of tags assigned on the instance, which have
been explicitly and implicitly added.
:param pulumi.Input[str] availability_zone: The availability zone in which to create
the server. Conflicts with `availability_zone_hints`. Changing this creates
a new server.
:param pulumi.Input[str] availability_zone_hints: The availability zone in which to
create the server. This argument is preferred to `availability_zone`, when
scheduling the server on a
[particular](https://docs.openstack.org/nova/latest/admin/availability-zones.html)
host or node. Conflicts with `availability_zone`. Changing this creates a
new server.
:param pulumi.Input[Sequence[pulumi.Input['InstanceBlockDeviceArgs']]] block_devices: Configuration of block devices. The block_device
structure is documented below. Changing this creates a new server.
You can specify multiple block devices which will create an instance with
multiple disks. This configuration is very flexible, so please see the
following [reference](https://docs.openstack.org/nova/latest/user/block-device-mapping.html)
for more information.
:param pulumi.Input[bool] config_drive: | |
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 1
branch_builder.set_algo()
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[0]
assert conflict.is_resolved
assert isinstance(conflict.resolution, conflict.AlgorithmResolution)
def test_code_change(self, parent_config, changed_code_config):
"""Test if giving a proper change-type solves the code conflict"""
conflicts = detect_conflicts(parent_config, changed_code_config)
branch_builder = ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 1
branch_builder.set_code_change_type(evc.adapters.CodeChange.types[0])
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[0]
assert conflict.is_resolved
assert isinstance(conflict, CodeConflict)
def test_bad_code_change(self, capsys, parent_config, changed_code_config):
"""Test if giving an invalid change-type prints error message and do nothing"""
conflicts = detect_conflicts(parent_config, changed_code_config)
branch_builder = ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
capsys.readouterr()
branch_builder.set_code_change_type('bad-type')
out, err = capsys.readouterr()
assert 'Invalid code change type' in out.split("\n")[-3]
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 1
def test_config_change(self, parent_config, changed_userconfig_config):
"""Test if giving a proper change-type solves the user script config conflict"""
conflicts = detect_conflicts(parent_config, changed_userconfig_config)
branch_builder = ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 4
assert len(conflicts.get_resolved()) == 1
branch_builder.set_script_config_change_type(evc.adapters.ScriptConfigChange.types[0])
assert len(conflicts.get()) == 4
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[1]
assert conflict.is_resolved
assert isinstance(conflict, ScriptConfigConflict)
def test_bad_config_change(self, capsys, parent_config, changed_userconfig_config):
"""Test if giving an invalid change-type prints error message and do nothing"""
conflicts = detect_conflicts(parent_config, changed_userconfig_config)
branch_builder = ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
capsys.readouterr()
branch_builder.set_script_config_change_type('bad-type')
out, err = capsys.readouterr()
assert 'Invalid script\'s config change type' in out.split("\n")[-3]
assert len(conflicts.get()) == 4
assert len(conflicts.get_resolved()) == 1
@pytest.mark.skip(reason='Args defined with \'=\' are not supported currently.')
def test_cli_change(self, parent_config, changed_cli_config):
"""Test if giving a proper change-type solves the command line conflict"""
conflicts = detect_conflicts(parent_config, changed_cli_config)
branch_builder = ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 1
branch_builder.set_cli_change_type(evc.adapters.CommandLineChange.types[0])
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[1]
assert conflict.is_resolved
assert isinstance(conflict, CommandLineConflict)
@pytest.mark.skip(reason='Args defined with \'=\' are not supported currently.')
def test_bad_cli_change(self, capsys, parent_config, changed_cli_config):
"""Test if giving an invalid change-type prints error message and do nothing"""
conflicts = detect_conflicts(parent_config, changed_cli_config)
branch_builder = ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
capsys.readouterr()
branch_builder.set_cli_change_type('bad-type')
out, err = capsys.readouterr()
assert 'Invalid cli change type' in out.split("\n")[-3]
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 1
def test_solve_all_automatically(self, conflicts):
"""Test if all conflicts all automatically resolve by the ExperimentBranchBuilder."""
ExperimentBranchBuilder(conflicts, {})
assert len(conflicts.get_resolved()) == 8
class TestResolutionsWithMarkers(object):
"""Test resolution of conflicts with markers"""
def test_add_new(self, parent_config, new_config):
"""Test if new dimension conflict is automatically resolved"""
new_config['metadata']['user_args'][-1] = '-w_d~+normal(0,1)'
conflicts = detect_conflicts(parent_config, new_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[1]
assert conflict.is_resolved
assert isinstance(conflict.resolution, conflict.AddDimensionResolution)
def test_add_new_default(self, parent_config, new_config):
"""Test if new dimension conflict is automatically resolved"""
new_config['metadata']['user_args'][-1] = '-w_d~+normal(0,1,default_value=0)'
backward.populate_priors(new_config['metadata'])
conflicts = detect_conflicts(parent_config, new_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[1]
assert conflict.is_resolved
assert isinstance(conflict.resolution, conflict.AddDimensionResolution)
assert conflict.resolution.default_value == 0.0
def test_add_bad_default(self, parent_config, new_config):
"""Test if new dimension conflict raises an error if marked with invalid default value"""
new_config['metadata']['user_args'][-1] = '-w_d~+normal(0,1,default_value=\'a\')'
backward.populate_priors(new_config['metadata'])
with pytest.raises(TypeError) as exc:
detect_conflicts(parent_config, new_config)
assert "Parameter \'/w_d\': Incorrect arguments." in str(exc.value)
def test_add_changed(self, parent_config, changed_config):
"""Test if changed dimension conflict is automatically resolved"""
changed_config['metadata']['user_args'][2] = (
changed_config['metadata']['user_args'][2].replace("~", "~+"))
conflicts = detect_conflicts(parent_config, changed_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[0]
assert conflict.is_resolved
assert isinstance(conflict.resolution, conflict.ChangeDimensionResolution)
def test_remove_missing(self, parent_config, child_config):
"""Test if missing dimension conflict is automatically resolved"""
child_config['metadata']['user_args'][1] = '-x~-'
backward.populate_priors(child_config['metadata'])
conflicts = detect_conflicts(parent_config, child_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[1]
assert conflict.is_resolved
assert isinstance(conflict.resolution, conflict.RemoveDimensionResolution)
def test_remove_missing_default(self, parent_config, child_config):
"""Test if missing dimension conflict is automatically resolved"""
child_config['metadata']['user_args'][1] = '-x~-0.5'
backward.populate_priors(child_config['metadata'])
conflicts = detect_conflicts(parent_config, child_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[1]
assert conflict.is_resolved
assert isinstance(conflict.resolution, conflict.RemoveDimensionResolution)
assert conflict.resolution.default_value == 0.5
def test_remove_missing_bad_default(self, parent_config, child_config):
"""Test if missing dimension conflict raises an error if marked with invalid default"""
child_config['metadata']['user_args'][1] = '-x~--100'
backward.populate_priors(child_config['metadata'])
conflicts = detect_conflicts(parent_config, child_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 1
conflict = conflicts.get()[1]
assert not conflict.is_resolved
assert isinstance(conflict, MissingDimensionConflict)
def test_rename_missing(self, parent_config, child_config):
"""Test if renaming is automatically applied with both conflicts resolved"""
child_config['metadata']['user_args'].append('-w_a~uniform(0,1)')
child_config['metadata']['user_args'].append('-w_b~normal(0,1)')
child_config['metadata']['user_args'][1] = '-x~>w_a'
backward.populate_priors(child_config['metadata'])
conflicts = detect_conflicts(parent_config, child_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 4
assert conflicts.get([ExperimentNameConflict])[0].is_resolved
assert conflicts.get(dimension_name='x')[0].is_resolved
assert conflicts.get(dimension_name='w_a')[0].is_resolved
assert not conflicts.get(dimension_name='w_b')[0].is_resolved
resolved_conflicts = conflicts.get_resolved()
assert len(resolved_conflicts) == 3
assert resolved_conflicts[1].resolution is resolved_conflicts[2].resolution
assert isinstance(resolved_conflicts[1].resolution,
resolved_conflicts[1].RenameDimensionResolution)
assert resolved_conflicts[1].resolution.conflict.dimension.name == '/x'
assert resolved_conflicts[1].resolution.new_dimension_conflict.dimension.name == '/w_a'
def test_rename_invalid(self, parent_config, child_config):
"""Test if renaming to invalid dimension raises an error"""
child_config['metadata']['user_args'].append('-w_a~uniform(0,1)')
child_config['metadata']['user_args'].append('-w_b~uniform(0,1)')
child_config['metadata']['user_args'][1] = '-x~>w_c'
backward.populate_priors(child_config['metadata'])
conflicts = detect_conflicts(parent_config, child_config)
with pytest.raises(ValueError) as exc:
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert "Dimension name 'w_c' not found in conflicts" in str(exc.value)
def test_rename_missing_changed(self, parent_config, child_config):
"""Test if renaming is automatically applied with both conflicts resolved,
but not the new one because of prior change
"""
child_config['metadata']['user_args'].append('-w_a~uniform(0,1)')
child_config['metadata']['user_args'].append('-w_b~normal(0,1)')
child_config['metadata']['user_args'][1] = '-x~>w_b'
backward.populate_priors(child_config['metadata'])
conflicts = detect_conflicts(parent_config, child_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 5
assert conflicts.get([ExperimentNameConflict])[0].is_resolved
assert conflicts.get(dimension_name='x')[0].is_resolved
assert conflicts.get([NewDimensionConflict], dimension_name='w_b')[0].is_resolved
assert not conflicts.get([ChangedDimensionConflict], dimension_name='w_b')[0].is_resolved
assert not conflicts.get(dimension_name='w_a')[0].is_resolved
resolved_conflicts = conflicts.get_resolved()
assert len(resolved_conflicts) == 3
assert resolved_conflicts[1].resolution is resolved_conflicts[2].resolution
assert isinstance(resolved_conflicts[1].resolution,
resolved_conflicts[1].RenameDimensionResolution)
assert resolved_conflicts[1].resolution.conflict.dimension.name == '/x'
assert resolved_conflicts[1].resolution.new_dimension_conflict.dimension.name == '/w_b'
def test_rename_missing_changed_marked(self, parent_config, child_config):
"""Test if renaming is automatically applied with all conflicts resolved including
the new one caused by prior change
"""
child_config['metadata']['user_args'].append('-w_a~uniform(0,1)')
child_config['metadata']['user_args'].append('-w_b~+normal(0,1)')
child_config['metadata']['user_args'][1] = '-x~>w_b'
backward.populate_priors(child_config['metadata'])
conflicts = detect_conflicts(parent_config, child_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 5
assert conflicts.get([ExperimentNameConflict])[0].is_resolved
assert conflicts.get(dimension_name='x')[0].is_resolved
assert conflicts.get([NewDimensionConflict], dimension_name='w_b')[0].is_resolved
assert conflicts.get([ChangedDimensionConflict], dimension_name='w_b')[0].is_resolved
assert not conflicts.get(dimension_name='w_a')[0].is_resolved
resolved_conflicts = conflicts.get_resolved()
assert len(resolved_conflicts) == 4
assert resolved_conflicts[1].resolution is resolved_conflicts[2].resolution
assert isinstance(resolved_conflicts[1].resolution,
resolved_conflicts[1].RenameDimensionResolution)
assert resolved_conflicts[1].resolution.conflict.dimension.name == '/x'
assert resolved_conflicts[1].resolution.new_dimension_conflict.dimension.name == '/w_b'
def test_name_experiment(self, parent_config, child_config, create_db_instance):
"""Test if experiment name conflict is automatically resolved"""
new_name = 'test2'
create_db_instance.write('experiments', parent_config)
conflicts = detect_conflicts(parent_config, child_config)
ExperimentBranchBuilder(conflicts, {'branch': new_name})
assert len(conflicts.get()) == 1
assert len(conflicts.get_resolved()) == 1
conflict = conflicts.get()[0]
assert conflict.resolution.new_name == new_name
assert conflict.new_config['name'] == new_name
assert conflict.is_resolved
def test_bad_name_experiment(self, parent_config, child_config, monkeypatch):
"""Test if experiment name conflict is not resolved when invalid name is marked"""
def _is_unique(self, *args, **kwargs):
return False
monkeypatch.setattr(ExperimentNameConflict.ExperimentNameResolution, "_name_is_unique",
_is_unique)
conflicts = detect_conflicts(parent_config, child_config)
ExperimentBranchBuilder(conflicts, {'branch': 'test2'})
assert len(conflicts.get()) == 1
assert len(conflicts.get_resolved()) == 0
def test_code_change(self, parent_config, changed_code_config):
"""Test if code conflict is resolved automatically"""
change_type = evc.adapters.CodeChange.types[0]
changed_code_config['code_change_type'] = change_type
conflicts = detect_conflicts(parent_config, changed_code_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[0]
assert conflict.is_resolved
assert isinstance(conflict.resolution, conflict.CodeResolution)
assert conflict.resolution.type == change_type
def test_algo_change(self, parent_config, changed_algo_config):
"""Test if algorithm conflict is resolved automatically"""
changed_algo_config['algorithm_change'] = True
conflicts = detect_conflicts(parent_config, changed_algo_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[0]
assert conflict.is_resolved
assert isinstance(conflict.resolution, conflict.AlgorithmResolution)
def test_config_change(self, parent_config, changed_userconfig_config):
"""Test if user's script's config conflict is resolved automatically"""
change_type = evc.adapters.ScriptConfigChange.types[0]
changed_userconfig_config['config_change_type'] = change_type
conflicts = detect_conflicts(parent_config, changed_userconfig_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 4
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[1]
assert conflict.is_resolved
assert isinstance(conflict.resolution, conflict.ScriptConfigResolution)
assert conflict.resolution.type == change_type
@pytest.mark.skip(reason='Args defined with \'=\' are not supported currently.')
def test_cli_change(self, parent_config, changed_cli_config):
"""Test if command line conflict is resolved automatically"""
change_type = evc.adapters.CommandLineChange.types[0]
changed_cli_config['cli_change_type'] = change_type
conflicts = detect_conflicts(parent_config, changed_cli_config)
ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
assert len(conflicts.get()) == 2
assert len(conflicts.get_resolved()) == 2
conflict = conflicts.get_resolved()[0]
assert conflict.is_resolved
assert isinstance(conflict.resolution, conflict.CommandLineResolution)
assert conflict.resolution.type == change_type
class TestAdapters(object):
"""Test creation of adapters"""
def test_adapter_add_new(self, parent_config, cl_config):
"""Test if a DimensionAddition is created when solving a new conflict"""
cl_config['metadata']['user_args'] = ['-w_d~+normal(0,1)']
conflicts = detect_conflicts(parent_config, cl_config)
branch_builder = ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
adapters = branch_builder.create_adapters().adapters
assert len(conflicts.get_resolved()) == 2
assert len(adapters) == 1
assert isinstance(adapters[0], evc.adapters.DimensionAddition)
def test_adapter_add_changed(self, parent_config, cl_config):
"""Test if a DimensionPriorChange is created when solving a new conflict"""
cl_config['metadata']['user_args'] = ['-y~+uniform(0,1)']
conflicts = detect_conflicts(parent_config, cl_config)
branch_builder = ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
adapters = branch_builder.create_adapters().adapters
assert len(conflicts.get_resolved()) == 2
assert len(adapters) == 1
assert isinstance(adapters[0], evc.adapters.DimensionPriorChange)
def test_adapter_remove_missing(self, parent_config, cl_config):
"""Test if a DimensionDeletion is created when solving a new conflict"""
cl_config['metadata']['user_args'] = ['-z~-']
conflicts = detect_conflicts(parent_config, cl_config)
branch_builder = ExperimentBranchBuilder(conflicts, {'manual_resolution': True})
adapters = | |
# -*- coding: utf-8 -*-
import urllib
import ConfigParser
import io
import StringIO
import gevent
import json
import pytz
import random
import zipfile
import re
from configobj import ConfigObj
from datetime import datetime, timedelta
from jsonfield import JSONField
from django.db import models, transaction
from django.template import Template
from django_extensions.db.fields import UUIDField, ShortUUIDField
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver, Signal
from django.db.models import F
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core import serializers
from django.utils import timezone
from fastapp.queue import generate_vhost_configuration, create_vhost
from fastapp.executors.remote import distribute, CONFIGURATION_EVENT, SETTINGS_EVENT
from fastapp.utils import Connection
from fastapp.plugins import call_plugin_func
from fastapp.plugins import PluginRegistry
from sequence_field.fields import SequenceField
import logging
logger = logging.getLogger(__name__)
index_template = """{% extends "fastapp/base.html" %}
{% block content %}
{% endblock %}
"""
class AuthProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name="authprofile")
access_token = models.CharField(max_length=72, help_text="Access token for dropbox-auth")
dropbox_userid = models.CharField(max_length=32,
help_text="Userid on dropbox",
default=None, null=True)
def __unicode__(self):
return self.user.username
class Base(models.Model):
name = models.CharField(max_length=32)
uuid = UUIDField(auto=True)
content = models.CharField(max_length=16384,
blank=True,
default=index_template)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='bases')
public = models.BooleanField(default=False)
static_public = models.BooleanField(default=False)
foreign_apys = models.ManyToManyField("Apy", related_name="foreign_base")
class Meta:
unique_together = (("name", "user"),)
@property
def url(self):
return "/fastapp/%s" % self.name
@property
def shared(self):
return "/fastapp/%s/index/?shared_key=%s" % (
self.name,
urllib.quote(self.uuid))
@property
def auth_token(self):
return self.user.authprofile.access_token
@property
def config(self):
config_string = StringIO.StringIO()
config = ConfigObj()
# execs
config['modules'] = {}
for texec in self.apys.all():
config['modules'][texec.name] = {}
config['modules'][texec.name]['module'] = texec.name+".py"
config['modules'][texec.name]['public'] = texec.public
if texec.description:
config['modules'][texec.name]['description'] = texec.description
else:
config['modules'][texec.name]['description'] = ""
# settings
config['settings'] = {}
for setting in self.setting.all():
if setting.public:
config['settings'][setting.key] = {
'value': setting.value,
'public': setting.public
}
else:
config['settings'][setting.key] = {
'value': "",
'public': setting.public
}
config.write(config_string)
return config_string.getvalue()
def refresh(self, put=False):
connection = Connection(self.user.authprofile.access_token)
template_name = "%s/index.html" % self.name
template_content = connection.get_file_content(template_name)
self.content = template_content
def refresh_execs(self, exec_name=None, put=False):
from fastapp.utils import Connection, NotFound
# execs
connection = Connection(self.user.authprofile.access_token)
app_config = "%s/app.config" % self.name
config = ConfigParser.RawConfigParser()
config.readfp(io.BytesIO(connection.get_file_content(app_config)))
if put:
if exec_name:
connection.put_file("%s/%s.py" % (self.name, exec_name),
self.execs.get(name=exec_name).module)
connection.put_file(app_config, self.config)
else:
raise Exception("exec_name not specified")
else:
for name in config.sections():
module_name = config.get(name, "module")
try:
module_content = connection.get_file_content("%s/%s" % (self.name, module_name))
except NotFound:
try:
Exec.objects.get(name=module_name, base=self).delete()
except Exec.DoesNotExist, e:
self.save()
# save new exec
app_exec_model, created = Apy.objects.get_or_create(base=self, name=name)
app_exec_model.module = module_content
app_exec_model.save()
# delete old exec
for local_exec in Apy.objects.filter(base=self).values('name'):
if local_exec['name'] in config.sections():
logger.warn()
else:
Apy.objects.get(base=self, name=local_exec['name']).delete()
def export(self):
# create in-memory zipfile
buffer = StringIO.StringIO()
zf = zipfile.ZipFile(buffer, mode='w')
# add modules
for apy in self.apys.all():
logger.info("add %s to zip" % apy.name)
zf.writestr("%s.py" % apy.name, apy.module.encode("utf-8"))
# add static files
try:
dropbox_connection = Connection(self.auth_token)
try:
zf = dropbox_connection.directory_zip("%s/static" % self.name, zf)
except Exception, e:
logger.warn(e)
except AuthProfile.DoesNotExist, e:
logger.warn(e)
except Exception, e:
logger.warn(e.__class__)
logger.exception(e)
# add config
zf.writestr("app.config", self.config.encode("utf-8"))
# add index.html
zf.writestr("index.html", self.content.encode("utf-8"))
# close zip
zf.close()
return buffer
def template(self, context):
t = Template(self.content)
return t.render(context)
@property
def state(self):
"""
States:
- DELETING
- DESTROYED
- STOPPED
- CREATING
- STARTING
- STARTED
- INITIALIZING
- RUNNING
Restart:
- RUNNING
- STOPPED
- STARTING
- STARTED
- RUNNING
Creation:
- CREATING
- STARTING
- STARTED
- INITIALIZING
- RUNNING
Destroy:
- RUNNING / STOPPED
- DELETING
- DESTROYED
"""
try:
return self.executor.is_running()
except (IndexError, Executor.DoesNotExist):
return False
@property
def executors(self):
try:
if self.executor.pid is None:
return []
return [
{
'pid': self.executor.pid,
'port': self.executor.port,
'ip': self.executor.ip,
'ip6': self.executor.ip6,
'plugins': self.executor.plugins
}
]
except Exception, e:
logger.exception(e)
return []
def start(self):
try:
self.executor
except Executor.DoesNotExist:
logger.debug("create executor for base %s" % self)
executor = Executor(base=self)
executor.save()
if not self.executor.is_running():
r = self.executor.start()
# call plugin
logger.info("on_start_base starting...")
call_plugin_func(self, "on_start_base")
logger.info("on_start_base done...")
return r
return None
def stop(self):
return self.executor.stop()
def destroy(self):
call_plugin_func(self, "on_destroy_base")
return self.executor.destroy()
def __str__(self):
return "<Base: %s>" % self.name
def save_and_sync(self, **kwargs):
ready_to_sync.send(self.__class__, instance=self)
self.save(**kwargs)
#def save(self, **kwargs):
# logger.debug("create executor for base %s" % self)
# print self.__dict__
# if not hasattr(self, 'executor'):
# executor = Executor(base=self)
# executor.save()
# self.save(**kwargs)
MODULE_DEFAULT_CONTENT = """def func(self):\n pass"""
class Apy(models.Model):
name = models.CharField(max_length=64)
module = models.CharField(max_length=16384, default=MODULE_DEFAULT_CONTENT)
base = models.ForeignKey(Base, related_name="apys", blank=True, null=True)
description = models.CharField(max_length=1024, blank=True, null=True)
public = models.BooleanField(default=False)
everyone = models.BooleanField(default=False)
rev = models.CharField(max_length=32, blank=True, null=True)
schedule = models.CharField(max_length=64, null=True, blank=True)
def mark_executed(self):
with transaction.atomic():
if not hasattr(self, "counter"):
self.counter = Counter(apy=self)
self.counter.save()
self.counter.executed = F('executed')+1
self.counter.save()
def mark_failed(self):
if not hasattr(self, "counter"):
self.counter = Counter(apy=self)
self.counter.save()
self.counter.failed = F('failed')+1
self.counter.save()
def get_exec_url(self):
return "/fastapp/base/%s/exec/%s/?json=" % (self.base.name, self.id)
def save_and_sync(self, **kwargs):
ready_to_sync.send(self.__class__, instance=self)
self.save(**kwargs)
def __str__(self):
return "%s %s" % (self. name, str(self.id))
class Counter(models.Model):
apy = models.OneToOneField(Apy, related_name="counter")
executed = models.IntegerField(default=0)
failed = models.IntegerField(default=0)
RUNNING = "R"
FINISHED = "F"
TIMEOUT = "T"
TRANSACTION_STATE_CHOICES = (
('R', 'RUNNING'),
('F', 'FINISHED'),
('T', 'TIMEOUT'),
)
def create_random():
rand = random.SystemRandom().randint(10000000, 99999999)
return rand
class Transaction(models.Model):
rid = models.IntegerField(primary_key=True, default=create_random)
apy = models.ForeignKey(Apy, related_name="transactions")
status = models.CharField(max_length=1, choices=TRANSACTION_STATE_CHOICES, default=RUNNING)
created = models.DateTimeField(default=timezone.now, null=True)
modified = models.DateTimeField(auto_now=True, null=True)
tin = JSONField(blank=True, null=True)
tout = JSONField(blank=True, null=True)
async = models.BooleanField(default=False)
@property
def duration(self):
td = self.modified - self.created
return td.days*86400000 + td.seconds*1000 + td.microseconds/1000
def log(self, level, msg):
logentry = LogEntry(transaction=self)
logentry.msg = msg
logentry.level = str(level)
logentry.save()
def save(self, *args, **kwargs):
super(self.__class__, self).save(*args, **kwargs)
@property
def apy_name(self):
return self.apy.name
@property
def base_name(self):
return self.apy.base.name
LOG_LEVELS = (
("10", 'DEBUG'),
("20", 'INFO'),
("30", 'WARNING'),
("40", 'ERROR'),
("50", 'CRITICAL')
)
class LogEntry(models.Model):
transaction = models.ForeignKey(Transaction, related_name="logs")
created = models.DateTimeField(auto_now_add=True, null=True)
level = models.CharField(max_length=2, choices=LOG_LEVELS)
msg = models.TextField()
def level_verbose(self):
return dict(LOG_LEVELS)[self.level]
@property
def slevel(self):
return self.level_verbose()
@property
def tid(self):
return self.transaction.rid
class Setting(models.Model):
base = models.ForeignKey(Base, related_name="setting")
key = models.CharField(max_length=128)
value = models.CharField(max_length=8192)
public = models.BooleanField(default=False, null=False, blank=False)
class Instance(models.Model):
is_alive = models.BooleanField(default=False)
uuid = ShortUUIDField(auto=True)
last_beat = models.DateTimeField(null=True, blank=True)
executor = models.ForeignKey("Executor", related_name="instances")
def mark_down(self):
self.is_alive = False
self.save()
def __str__(self):
return "Instance: %s" % (self.executor.base.name)
class Host(models.Model):
name = models.CharField(max_length=50)
class Process(models.Model):
running = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=64, null=True)
rss = models.IntegerField(max_length=7, default=0)
version = models.CharField(max_length=7, default=0)
def up(self):
pass
# logger.info("Heartbeat is up")
def is_up(self):
now = datetime.utcnow().replace(tzinfo = pytz.utc)
delta = now - self.running
return (delta < timedelta(seconds=10))
class Thread(models.Model):
STARTED = "SA"
STOPPED = "SO"
NOT_CONNECTED = "NC"
HEALTH_STATE_CHOICES = (
(STARTED, "Started"),
(STOPPED, "Stopped"),
(NOT_CONNECTED, "Not connected")
)
name = models.CharField(max_length=64, null=True)
parent = models.ForeignKey(Process, related_name="threads", blank=True, null=True)
health = models.CharField(max_length=2,
choices=HEALTH_STATE_CHOICES,
default=STOPPED)
updated = models.DateTimeField(auto_now=True, null=True)
def started(self):
self.health = Thread.STARTED
self.save()
def not_connected(self):
self.health = Thread.NOT_CONNECTED
self.save()
def default_pass():
return get_user_model().objects.make_random_password()
class Executor(models.Model):
base = models.OneToOneField(Base, related_name="executor")
num_instances = models.IntegerField(default=1)
pid = models.CharField(max_length=72, null=True)
password = models.CharField(max_length=20, default=default_pass)
started = models.BooleanField(default=False)
ip = models.GenericIPAddressField(null=True)
ip6 = models.GenericIPAddressField(null=True)
port = SequenceField(
key='test.sequence.1',
template='1%NNNN',
auto=True,
null=True
)
def __init__(self, *args, **kwargs):
super(Executor, self ).__init__(*args, **kwargs)
self.attach_plugins()
def attach_plugins(self):
# attach plugins
plugins = PluginRegistry()
if not hasattr(self, "plugins"):
self.plugins = {}
for plugin in plugins:
logger.debug("Attach %s.return_to_executor to executor instance '%s'" % (plugin.name, self.base.name))
if hasattr(plugin, "return_to_executor"):
self.plugins[plugin.name.lower()] = plugin.return_to_executor(self)
@property
def vhost(self):
return generate_vhost_configuration(self.base.user.username,
self.base.name)
@property
def implementation(self):
s_exec = getattr(settings, 'FASTAPP_WORKER_IMPLEMENTATION',
'fastapp.executors.worker_engines.spawnproc.SpawnExecutor')
regex = re.compile("(.*)\.(.*)")
r = regex.search(s_exec)
s_mod = r.group(1)
s_cls = r.group(2)
m = __import__(s_mod, globals(), locals(), [s_cls])
try:
cls = m.__dict__[s_cls]
return cls(
vhost=self.vhost,
base_name=self.base.name,
username=self.base.name,
password=<PASSWORD>,
executor=self
)
except KeyError, e:
logger.error("Could not load %s" % s_exec)
raise e
def start(self):
logger.info("Start manage.py start_worker")
create_vhost(self.base)
try:
instance = Instance.objects.get(executor=self)
logger.info("Instance found with id %s" % instance.id)
except Instance.DoesNotExist, e:
instance = Instance(executor=self)
instance.save()
logger.info("Instance for '%s' created with id %s" % (self, instance.id))
kwargs = {}
if self.port:
kwargs['service_ports'] = [self.port]
try:
logger.info("START Start with implementation")
self.pid = self.implementation.start(self.pid, **kwargs)
logger.info("END Start with implementation")
except Exception, e:
raise e
logger.info("%s: worker started with pid %s" % (self, self.pid))
self.started = True
ips = self.implementation.addresses(self.pid, port=self.port)
self.ip = ips['ip']
self.ip6 = ips['ip6']
self.save()
logger.info("%s: worker saved with pid %s" % (self, self.pid))
def stop(self):
logger.info("Stop worker with PID %s" % self.pid)
self.implementation.stop(self.pid)
if not self.implementation.state(self.pid):
self.started = False
# Threads
try:
process = Process.objects.get(name=self.vhost)
for thread in process.threads.all():
thread.delete()
except Exception:
pass
self.save()
logger.info("Stopped worker with PID %s" % self.pid)
def restart(self):
self.stop()
self.start()
def destroy(self):
self.implementation.destroy(self.pid)
self.pid = None
self.started = False
self.save()
def is_running(self):
# if no pid, return directly false
if not self.pid:
return | |
request = self.get_request(post_data={'no': 'data'})
self.assertFalse(LogEntry.objects.count())
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
self.assertEqual(1, LogEntry.objects.count())
self.assertEqual(page.pk, int(LogEntry.objects.all()[0].object_id))
def test_change_innavigation(self):
page = self.get_page()
permless = self.get_permless()
admin_user = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 405)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
self.assertEqual(response.status_code, 403)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
self.assertRaises(Http404, self.admin_class.change_innavigation,
request, page.pk + 100)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
old = page.in_navigation
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 204)
page = self.reload(page)
self.assertEqual(old, not page.in_navigation)
def test_publish_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.publish_page(request, Page.objects.drafts().first().pk, "en")
self.assertEqual(response.status_code, 403)
def test_remove_plugin_requires_post(self):
ph = self.page.placeholders.all()[0]
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
endpoint = self.get_delete_plugin_uri(plugin, container=self.page)
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
def test_move_language(self):
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
col = add_plugin(source, 'MultiColumnPlugin', 'en')
sub_col = add_plugin(source, 'ColumnPlugin', 'en', target=col)
col2 = add_plugin(source, 'MultiColumnPlugin', 'de')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
data = {
'plugin_id': sub_col.pk,
'placeholder_id': source.id,
'plugin_parent': col2.pk,
'target_language': 'de'
}
endpoint = self.get_move_plugin_uri(sub_col)
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
sub_col = CMSPlugin.objects.get(pk=sub_col.pk)
self.assertEqual(sub_col.language, "de")
self.assertEqual(sub_col.parent_id, col2.pk)
def test_preview_page(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
self.assertRaises(Http404, self.admin_class.preview_page, request, 404, "en")
page = self.get_page()
page.publish("en")
page.set_as_homepage()
new_site = Site.objects.create(id=2, domain='django-cms.org', name='django-cms')
new_page = create_page("testpage", "nav_playground.html", "fr", site=new_site, published=True)
base_url = page.get_absolute_url()
with self.login_user_context(permless):
request = self.get_request('/?public=true')
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
request = self.get_request()
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
# Switch active site
request.session['cms_admin_site'] = new_site.pk
# Preview page attached to active site but not to current site
response = self.admin_class.preview_page(request, new_page.pk, 'fr')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'],
'http://django-cms.org/fr/testpage/?%s&language=fr' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
def test_too_many_plugins_global(self):
conf = {
'body': {
'limits': {
'global': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'target_language': 'en',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_too_many_plugins_type(self):
conf = {
'body': {
'limits': {
'TextPlugin': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'target_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_too_many_plugins_global_children(self):
from urllib.parse import urlencode
conf = {
'body': {
'limits': {
'global_children': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
link = add_plugin(body, 'LinkPlugin', 'en', name='text', external_link='http://test.test/')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
}
url = admin_reverse('cms_page_add_plugin')
response = self.client.post('{}?{}'.format(url,urlencode(data)))
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_parent': link.pk,
'plugin_language': 'en',
}
response = self.client.post('{}?{}'.format(url,urlencode(data)))
self.assertEqual(response.status_code, 302)
def test_edit_title_dirty_bit(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_edit_title_languages(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
class NoDBAdminTests(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def test_lookup_allowed_site__exact(self):
self.assertTrue(self.admin_class.lookup_allowed('site__exact', '1'))
def test_lookup_allowed_published(self):
self.assertTrue(self.admin_class.lookup_allowed('published', value='1'))
class PluginPermissionTests(AdminTestsBase):
def setUp(self):
self._page = create_page('test page', 'nav_playground.html', 'en')
self._placeholder = self._page.placeholders.all()[0]
def _get_admin(self):
User = get_user_model()
fields = dict(email="<EMAIL>", is_staff=True, is_active=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
admin_user = User(**fields)
admin_user.set_password('<PASSWORD>')
admin_user.save()
return admin_user
def _get_page_admin(self):
return admin.site._registry[Page]
def _give_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.add(Permission.objects.get(codename=codename))
def _give_page_permission_rights(self, user):
self._give_permission(user, PagePermission, 'add')
self._give_permission(user, PagePermission, 'change')
self._give_permission(user, PagePermission, 'delete')
def _get_change_page_request(self, user, page):
return type('Request', (object,), {
'user': user,
'path': base.URL_CMS_PAGE_CHANGE % page.pk
})
def _give_cms_permissions(self, user, save=True):
for perm_type in ['add', 'change', 'delete']:
for model in [Page, Title]:
self._give_permission(user, model, perm_type, False)
gpp = GlobalPagePermission.objects.create(
user=user,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
if save:
user.save()
def _create_plugin(self):
plugin = add_plugin(self._placeholder, 'TextPlugin', 'en')
return plugin
def test_plugin_edit_wrong_url(self):
"""User tries to edit a plugin using a random url. 404 response returned"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='<EMAIL>', password='<EMAIL>')
else:
self.client.login(username='test', password='<PASSWORD>')
self._give_permission(normal_guy, Text, 'change')
url = '%s/edit-plugin/%s/' % (admin_reverse('cms_page_edit_plugin', args=[plugin.id]), plugin.id)
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseNotFound.status_code)
self.assertTrue("Plugin not found" in force_str(response.content))
class AdminFormsTests(AdminTestsBase):
def test_clean_overwrite_url(self):
"""
A manual path needs to be stripped from leading and trailing slashes.
"""
superuser = self.get_superuser()
cms_page = create_page('test page', 'nav_playground.html', 'en')
page_data = {
'overwrite_url': '/overwrite/url/',
'template': cms_page.template,
}
endpoint = self.get_admin_url(Page, 'advanced', cms_page.pk)
with self.login_user_context(superuser):
response = self.client.post(endpoint, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertSequenceEqual(
cms_page.title_set.values_list('path', 'has_url_overwrite'),
[('overwrite/url', True)],
)
def test_missmatching_site_parent_dotsite(self):
superuser = self.get_superuser()
new_site = Site.objects.create(id=2, domain='foo.com', name='foo.com')
parent_page = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "fr", site=new_site)
new_page_data = {
'title': 'Title',
'slug': 'slug',
'parent_node': parent_page.node.pk,
}
with self.login_user_context(superuser):
# Invalid parent
response = self.client.post(self.get_admin_url(Page, 'add'), new_page_data)
expected_error = (
'<ul class="errorlist">'
'<li>Site doesn't match the parent's page site</li></ul>'
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, expected_error, html=True)
def test_form_errors(self):
superuser = self.get_superuser()
site0 = Site.objects.create(id=2, domain='foo.com', name='foo.com')
page1 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "fr", site=site0)
new_page_data = {
'title': 'Title',
'slug': 'home',
'parent_node': page1.node.pk,
}
with self.login_user_context(superuser):
# Invalid parent
response = self.client.post(self.get_admin_url(Page, 'add'), new_page_data)
expected_error = (
'<ul class="errorlist">'
'<li>Site doesn't match the parent's page site</li></ul>'
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, expected_error, html=True)
new_page_data = {
'title': 'Title',
'slug': '#',
}
with self.login_user_context(superuser):
# Invalid slug
response = self.client.post(self.get_admin_url(Page, 'add'), new_page_data)
expected_error = '<ul class="errorlist"><li>Slug must not be empty.</li></ul>'
self.assertEqual(response.status_code, 200)
self.assertContains(response, expected_error, html=True)
page2 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "en")
new_page_data = {
'title': 'Title',
'slug': 'test',
}
with self.login_user_context(superuser):
# Duplicate slug / path
response = self.client.post(self.get_admin_url(Page, 'add'), new_page_data)
expected_error = (
'<ul class="errorlist"><li>Page '
'<a href="{}" target="_blank">test</a> '
'has the same url \'test\' as current page.</li></ul>'
).format(self.get_admin_url(Page, 'change', page2.pk))
self.assertEqual(response.status_code, 200)
self.assertContains(response, expected_error, html=True)
def test_reverse_id_error_location(self):
superuser = self.get_superuser()
create_page('Page 1', 'nav_playground.html', 'en', reverse_id='p1')
page2 = create_page('Page 2', 'nav_playground.html', 'en')
page2_endpoint = self.get_admin_url(Page, 'advanced', page2.pk)
# Assemble a bunch of data to test the page form
page2_data = {
'reverse_id': 'p1',
'template': 'col_two.html',
}
with self.login_user_context(superuser):
response = self.client.post(page2_endpoint, page2_data)
expected_error = (
'<ul class="errorlist">'
'<li>A page with this reverse URL id exists already.</li></ul>'
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, expected_error.format(page2.pk), html=True)
def test_advanced_settings_endpoint(self):
admin_user = self.get_superuser()
site = Site.objects.get_current()
page = create_page('Page 1', 'nav_playground.html', 'en')
page_data = {
'language': 'en',
'site': site.pk,
'template': 'col_two.html',
}
path = admin_reverse('cms_page_advanced', args=(page.pk,))
with self.login_user_context(admin_user):
en_path = path + "?language=en"
redirect_path = admin_reverse('cms_page_changelist') + '?language=en'
response = self.client.post(en_path, page_data)
self.assertRedirects(response, redirect_path)
self.assertEqual(Page.objects.get(pk=page.pk).template, 'col_two.html')
# Now switch it up by adding german as the current language
# Note that german has not been created as page translation.
page_data['language'] = 'de'
page_data['template'] = 'nav_playground.html'
with self.login_user_context(admin_user):
de_path = path + "?language=de"
redirect_path = admin_reverse('cms_page_change', args=(page.pk,)) + '?language=de'
response = self.client.post(de_path, page_data)
# Assert user is redirected to basic settings.
self.assertRedirects(response, redirect_path)
# Make sure no change was made
self.assertEqual(Page.objects.get(pk=page.pk).template, 'col_two.html')
de_translation = create_title('de', title='Page 1', page=page.reload())
de_translation.slug = ''
de_translation.save()
# Now try again but slug is set to empty string.
page_data['language'] = 'de'
page_data['template'] = 'nav_playground.html'
with self.login_user_context(admin_user):
de_path = path + "?language=de"
response = self.client.post(de_path, page_data)
# Assert user is not redirected because there was a form error
self.assertEqual(response.status_code, 200)
# Make sure no change was made
self.assertEqual(Page.objects.get(pk=page.pk).template, 'col_two.html')
de_translation.slug = 'someslug'
de_translation.save()
# Now try again but with the title having a slug.
page_data['language'] = 'de'
page_data['template'] = 'nav_playground.html'
with | |
= 'G'
return module
def prepare_psf(self):
"""Get metadata relevant to the instrument/detector as well as the
pixel scale from either the mosaic file, the PSF file, or both.
Read in or create the PSF that is representative of the mosaic
data.
"""
# Get the PSF associated with the mosaic
if self.psf_file is None:
# If no PSF file is given and there is no pixel scale listed in
# the mosaic file, then we cannot continue
if self.mosaic_metadata['pix_scale1'] is None:
raise ValueError(("ERROR: pixel scale value not present in mosaic file "
"(in CD1_1 header keyword). This information is needed "
"to be able to convolve the mosaic with the proper PSF "
"kernel."))
# Get the dimensions of the JWST PSF kernel representing the
# final PSF to use in the simulation
jwst_ydim, jwst_xdim = self.jwst_psf.shape
# If no psf file is given, and the user requests a 2D Gaussian,
# then make a 2D Gaussian
if self.gaussian_psf:
self.logger.info("Creating 2D Gaussian for mosiac PSF")
# If a Gaussian FWHM value is given, then construct the
# PSF using astropy's Gaussian2D kernel
# Rough guess on dimensions to use
scale_ratio1 = self.outscale1 / self.mosaic_metadata['pix_scale1']
scale_ratio2 = self.outscale2 / self.mosaic_metadata['pix_scale2']
gauss_xdim = int(np.round(jwst_xdim * scale_ratio1))
gauss_ydim = int(np.round(jwst_ydim * scale_ratio2))
# Make sure the array has an odd number of rows and columns
if gauss_xdim % 2 == 0:
gauss_xdim += 1
if gauss_ydim % 2 == 0:
gauss_ydim += 1
self.logger.info('Mosaic pixel scale: {} x {}'.format(self.mosaic_metadata['pix_scale1'], self.mosaic_metadata['pix_scale2']))
self.logger.info('JWST pixel scale: {} x {}'.format(self.outscale1, self.outscale2))
self.logger.info('scale ratios: {}, {}'.format(scale_ratio1, scale_ratio2))
self.logger.info('JWST PSF dims: {} x {}'.format(jwst_xdim, jwst_ydim))
self.logger.info('Gaussian PSF dimensions: {} x {}'.format(gauss_xdim, gauss_ydim))
# Create 2D Gaussian
self.mosaic_psf = tools.gaussian_psf(self.mosaic_fwhm, gauss_xdim, gauss_ydim)
self.logger.debug('Temporarily saving psf for development')
h0 = fits.PrimaryHDU(self.mosaic_psf)
hlist = fits.HDUList([h0])
hlist.writeto('gaussian_2d_psf.fits', overwrite=True)
elif self.mosaic_metadata['telescope'] in KNOWN_PSF_TELESCOPES and not self.gaussian_psf:
# If no PSF file is given and the user does not want a
# generic 2D Gaussian (self.gaussian_psf is False),
# check to see if the mosaic is from one of the telescopes
# with a known PSF. If so, use the appropriate function to
# construct a PSF
if self.mosaic_metadata['telescope'] == 'HST':
self.logger.info('Creating HST PSF, using 2D Gaussian')
self.logger.info('HST FWHM in arcsec: {}'.format(self.mosaic_fwhm * self.mosaic_metadata['pix_scale2']))
self.mosaic_psf = tools.get_HST_PSF(self.mosaic_metadata, self.mosaic_fwhm)
elif self.mosaic_metadata['telescope'] == 'JWST':
self.logger.info("Retrieving JWST PSF")
self.mosaic_psf = tools.get_JWST_PSF(self.mosaic_metadata)
elif self.mosaic_metadata['instrument'] == 'IRAC':
self.logger.info("Retrieving IRAC PSF")
self.mosaic_psf = tools.get_IRAC_PSF(self.mosaic_metadata)
else:
raise ValueError(("For telescopes other than {}, you must either provide a "
"representative PSF using the psf_file keyword, or set "
"gaussian_psf=True in order to construct a 2D Gaussian PSF."
"This is neeeded to create the proper PSF kernal to transform "
"to the appropriate JWST PSF."))
else:
# If a PSF file is provided, check for any metadata. Metadata
# from the mosaic takes precidence over metadata in the PSF file.
psf_metadata = tools.get_psf_metadata(psf_filename)
# If the mosaic has no pixel scale info but the PSF file does,
# use the value from the PSF file.
if self.mosaic_metadata['pix_scale1'] is None:
if psf_metadata['pix_scale1'] is not None:
self.mosaic_metadata['pix_scale1'] = psf_metadata['pix_scale1']
else:
raise ValueError(("ERROR: no pixel scale value present in mosaic file nor PSF "
"file metadata (in CD1_1 header keyword). This information is "
"needed to be able to convolve the mosaic with the proper PSF "
"kernel."))
self.mosaic_psf = fits.getdata(psf_filename)
def psf_convolution(self, model):
"""Convolve the cropped image with the appropriate PSF for the
JWST detector being simulated.
Parameters
----------
model : jwst.datamodels.ImageModel
Data model instance containing the cropped image
Returns
-------
model : jwst.datamodels.ImageModel
Data model with image convolved by the PSF
"""
# The jwst_psf and the mosaic_psf must have the same array size
# and the same pixel scale. First deal with the pixel scale.
# Rescale one of the PSFs if necessary, in order to get matching pixel scales.
# Since the matching kernel is going to be convolved with the mosaic image,
# then it seems like we should match the PSFs at the mosaic pixel scale.
if not np.isclose(self.outscale1, self.mosaic_metadata['pix_scale1'], atol=0., rtol=0.01):
orig_jwst = copy.deepcopy(self.jwst_psf)
self.jwst_psf = resize_psf(self.jwst_psf, self.outscale1, self.mosaic_metadata['pix_scale1'], order=3)
resized_y_dim, resized_x_dim = self.jwst_psf.shape
if ((resized_y_dim % 2 == 0) or (resized_x_dim % 2 == 0)):
if resized_y_dim % 2 == 0:
new_y_dim = resized_y_dim + 1
else:
new_y_dim = resized_y_dim
if resized_x_dim % 2 == 0:
new_x_dim = resized_x_dim + 1
else:
new_x_dim = resized_x_dim
# Add a column/row to the resized array,
jwst_psf_padded = np.zeros((new_y_dim, new_x_dim))
jwst_psf_padded[0: resized_y_dim, 0: resized_x_dim] = self.jwst_psf
# Rather than zeros, make the top row/leftmost column a
# copy of the row/column next to it
if new_y_dim > resized_y_dim:
jwst_psf_padded[-1, 0: resized_x_dim] = self.jwst_psf[-1, :]
if new_x_dim > resized_x_dim:
jwst_psf_padded[0: resized_y_dim, -1] = self.jwst_psf[:, -1]
if ((new_y_dim > resized_y_dim) and (new_x_dim > resized_x_dim)):
jwst_psf_padded[-1, -1] = self.jwst_psf[-1, 1]
# Shift the source to be centered in the center pixel
centerx, centery = centroid_2dg(jwst_psf_padded)
jwst_shifted = shift(jwst_psf_padded, [0.5, 0.5], order=1)
centerx, centery = centroid_2dg(jwst_shifted)
self.jwst_psf = jwst_shifted
else:
jwst_psf_padded = self.jwst_psf
jwst_shape = self.jwst_psf.shape
mosaic_shape = self.mosaic_psf.shape
if ((jwst_shape[0] % 2 != mosaic_shape[0] % 2) or (jwst_shape[1] % 2 != mosaic_shape[1] % 2)):
raise ValueError(("ERROR: Mosaic PSF and JWST PSF have different shapes in terms "
"of odd/even numbers of rows and/or columns. Try adding or subtracting "
"rows/columns to the mosaic PSF. Mosaic PSF shape: {}, JWST PSF shape: {}"
.format(mosaic_shape, jwst_shape)))
# Now crop either the resized JWST PSF or the mosaic PSF in
# order to get them both to the same array size
self.logger.info("Crop PSFs to have the same array size")
self.jwst_psf, self.mosaic_psf = tools.same_array_size(self.jwst_psf, self.mosaic_psf)
# Now we make a matching kernel. The mosaic can then be
# convolved with this kernel in order to adjust the PSFs to match
# those from JWST.
self.logger.info("Create matching kernel")
kernel = self.matching_kernel(self.mosaic_psf, self.jwst_psf, window_type='TukeyWindow',
alpha=1.5, beta=1.5)
if self.save_intermediates:
self.logger.info('Save JWST psf and matching psf in outgoing_and_matching_kernel.fits')
ha = fits.PrimaryHDU(orig_jwst)
h0 = fits.ImageHDU(self.jwst_psf)
h1 = fits.ImageHDU(self.mosaic_psf)
h2 = fits.ImageHDU(kernel)
hlist = fits.HDUList([ha, h0, h1, h2])
outfile = os.path.join(self.outdir,
'{}_outgoing_and_matching_kernel.fits'.format(self.output_base))
hlist.writeto(outfile, overwrite=True)
self.logger.info('Convolve image cropped from mosaic with the matching PSF kernel')
start_time = datetime.datetime.now()
convolved_mosaic = fftconvolve(model.data, kernel, mode='same')
end_time = datetime.datetime.now()
delta_time = end_time - start_time
self.logger.info("Convolution took {} seconds".format(delta_time.seconds))
model.data = convolved_mosaic
if self.save_intermediates:
self.logger.info('Saving convolved mosaic as convolved_mosaic.fits')
h0 = fits.PrimaryHDU(convolved_mosaic)
hlist = fits.HDUList([h0])
outfile = os.path.join(self.outdir,
'{}_convolved_mosaic.fits'.format(self.output_base))
hlist.writeto(outfile, overwrite=True)
return model
def read_param_file(self, file):
"""Read the input yaml file into a nested dictionary
Parameters
----------
file : str
Name of the input yaml file
"""
if os.path.isfile(file):
with open(file, 'r') as f:
self.params = yaml.safe_load(f)
else:
raise FileNotFoundError(("ERROR: {} does not exist."
.format(file)))
self.instrument = self.params['Inst']['instrument'].upper()
self.aperture = self.params['Readout']['array_name'].upper()
self.detector_channel_value()
self.crop_center_ra = self.params['Telescope']['ra']
self.crop_center_dec = self.params['Telescope']['dec']
self.blot_center_ra = self.params['Telescope']['ra']
self.blot_center_dec = self.params['Telescope']['dec']
self.blot_pav3 = self.params['Telescope']['rotation']
self.flux_cal_file = self.params['Reffiles']['flux_cal']
self.distortion_file = self.params['Reffiles']['astrometric']
self.filter = self.params['Readout']['filter']
self.pupil = self.params['Readout']['pupil']
self.output_base = self.params['Output']['file']
# If input is to be dispersed later, we need to
# expand the region to be blotted
if self.params['Output']['grism_source_image']:
self.grism_coord_adjust()
if self.outdir is None:
self.outdir = self.params['Output']['directory']
# Create a dictionary for future CRDS query
self.reference_dictionary = crds_tools.dict_from_yaml(self.params)
def add_options(self, parser=None, usage=None):
if parser is None:
parser = argparse.ArgumentParser(usage=usage, \
description='Create seed image via catalogs')
parser.add_argument("paramfile", help='File describing the input parameters and instrument settings to use. (YAML format).')
parser.add_argument("mosaic_file", help="Fits file containing the image to create the seed image from")
parser.add_argument("data_extension_number", help="Extension in fits file that contains mosaic image", default=0)
parser.add_argument("wcs_extension_number", help="Extension in mosaic fits file that contains WCS information", default=0)
parser.add_argument("cropped_file", help="Filename used to save image cropped from mosaic", default=None)
parser.add_argument("blotted_file", help="Filename used to save resampled image", default=None)
parser.add_argument("outdir", help="Output directory. If None, the output dir from paramfile is used.", default=None)
parser.add_argument("psf_file", help="Name of fits file containing PSF corresponding to mosaic image", default=None)
parser.add_argument("mosaic_fwhm", help="FWHM of PSF in mosaic image, in pixels or arcsec", default=None)
parser.add_argument("mosaic_fwhm_units", | |
<reponame>tryone144/dotfiles
#!/usr/bin/env python3
# -*- coding: utf8 -*-
#
# I3WM
# powerline-styled wrapper for i3status/j4status
# needs: [i3]
# [powerline-fonts]
# [ionicon-fonts]
#
# file: ~/.config/i3/panel/arrowbar_renderer.py
# v0.5.1 / 2015.11.12
#
import re
import sys
POSITION_LEFT = 0
POSITION_RIGHT = 1
# Special characters and icons
SEPARATOR_SEGMENT_RIGHT = "" # \uE0B0
SEPARATOR_SEGMENT_LEFT = "" # \uE0B2
SEPARATOR_PATH_RIGHT = "" # \uE0B1
SEPARATOR_PATH_LEFT = "" # \uE0B3
ICON_TIME = "" # \uF394
ICON_BACKLIGHT = "" # \uF4B7
ICON_CPU = "" # \uF37F
ICON_MEM = "" # \uF2EC
ICON_TEMP = "" # \uF390
ICON_WINTITLE = "" # \uF2B3
ICON_STORAGE_DISK = "" # \uF44E
ICON_STORAGE_HOME = "" # \uF144
ICON_BATTERY_EMPTY = "" # \uF295
ICON_BATTERY_LOW = "" # \uF295
ICON_BATTERY_HALF = "" # \uF296
ICON_BATTERY_FULL = "" # \uF296
ICON_BATTERY_CHARGE = "" # \uF294
ICON_VOLUME_MUTE = "" # \uF3A2
ICON_VOLUME_LOW = "" # \uF3A1
ICON_VOLUME_MEDIUM = "" # \uF131
ICON_VOLUME_HIGH = "" # \uF123
ICON_NETWORK_WIFI = "" # \uF26D
ICON_NETWORK_ETHER = "" # \uF22F
ICON_NETWORK_USB = "" # \uF2B8
# RegEx and format-strings
ICON_EXP = re.compile(":(.*)_IC:")
BAT_EXP = re.compile("(Bat|Chr|Full|Empty)\s")
VOL_EXP = re.compile("([\d]+)%")
STRIP_EXP = re.compile("^\d+(\D.*)")
ACTION_START_FMT = "%{{A{button}:{action}:}}"
ACTION_END_FMT = "%{{A{button}}}"
COLOR_FMT = "#{aa}{rr}{gg}{bb}"
OUTPUT_FMT = "%{{S{output}}}"
# Color codes
def parse_color(code):
if len(code) == 9 and code[0] == "#":
return code
else:
color = {"aa": "FF", "rr": "FF", "gg": "FF", "bb": "FF"}
if code[0] == "#":
code = code[1:]
if len(code) == 3:
color["rr"] = code[0]*2
color["gg"] = code[1]*2
color["bb"] = code[2]*2
elif len(code) == 4:
color["aa"] = code[0]*2
color["rr"] = code[1]*2
color["gg"] = code[2]*2
color["bb"] = code[3]*2
elif len(code) == 6:
color["rr"] = code[0:2]
color["gg"] = code[2:4]
color["bb"] = code[4:6]
return COLOR_FMT.format(**color)
COLOR = parse_color
COL_GOOD = "#00FF00"
COL_BAD = "#FF0000"
COL_DEFAULT_FG = COLOR("#FFFF")
COL_DEFAULT_BG = COLOR("#F000")
COLOR_SEP = COLOR("#CCC")
COLOR_URGENT_FG = COLOR("#FFF")
COLOR_URGENT_BG = COLOR("#900")
COLOR_WORKSPACE_ACTIVE_FG = COLOR("#EEE")
COLOR_WORKSPACE_ACTIVE_BG = COLOR("#1793d1")
COLOR_WORKSPACE_INACTIVE_FG = COLOR("#888")
COLOR_WORKSPACE_INACTIVE_BG = COLOR("#333")
COLOR_WORKSPACE_URGENT_FG = COLOR_URGENT_FG
COLOR_WORKSPACE_URGENT_BG = COLOR_URGENT_BG
COLOR_TITLE_ICON_FG = COLOR("#CCC")
COLOR_TITLE_ICON_BG = COLOR("#22D")
COLOR_TITLE_URGENT_FG = COLOR_URGENT_FG
COLOR_TITLE_URGENT_BG = COLOR_URGENT_BG
COLOR_STATUS_TIME_FG = COLOR("#EEE")
COLOR_STATUS_TIME_BG = COLOR("#1793d1")
COLOR_STATUS_VOL_BG = COLOR("#555")
COLOR_STATUS_VOL_MUTE_BG = COLOR("#846")
COLOR_STATUS_BATTERY_BG = COLOR("#444")
COLOR_STATUS_URGENT_FG = COLOR_URGENT_FG
COLOR_STATUS_URGENT_BG = COLOR_URGENT_BG
# Helper functions (output)
def fprint_nb(fd, msg):
fd.write(str(msg))
fd.flush()
def print_nb(msg):
fprint_nb(sys.stdout, str(msg))
class Renderer(object):
SEGMENTS_LEFT = [re.compile(pat) for pat in ("workspace", "title", )]
SEGMENTS_RIGHT = [re.compile(pat) for pat in ("pulseaudio",
"backlight",
"nm-*",
"cpu", "sensors", "mem",
"upower-battery",
"time", )]
def __init__(self):
super().__init__()
self.show_on = None
self.outputs = []
self.left = [[] for _ in Renderer.SEGMENTS_LEFT]
self.right = [[] for _ in Renderer.SEGMENTS_RIGHT]
self.workspace_objs = []
self.title_objs = []
self.status_objs = []
for exp in Renderer.SEGMENTS_LEFT + Renderer.SEGMENTS_RIGHT:
if exp.pattern == "workspace":
self.workspace_objs.append(exp.pattern)
elif exp.pattern == "title":
self.title_objs.append(exp.pattern)
else:
self.status_objs.append(exp.pattern)
self.__separator_reset = self.__escape_color(bg="-") + \
SEPARATOR_SEGMENT_RIGHT + \
self.__escape_color(fg="-", bg="-")
self.__line_reset = self.__escape_color(fg="-", bg="-")
self.__title_icon_tag = self.__tag("title", " " + ICON_WINTITLE + " ",
color_fg=COLOR_TITLE_ICON_FG,
color_bg=COLOR_TITLE_ICON_BG)
def set_outputs(self, out):
self.show_on = out
def clear_title(self):
# clear old title
for i, exp in enumerate(Renderer.SEGMENTS_LEFT):
if exp.pattern in self.title_objs:
self.left[i].clear()
self.left[i].append(self.__title_icon_tag)
for i, exp in enumerate(Renderer.SEGMENTS_RIGHT):
if exp.pattern in self.title_objs:
self.right[i].clear()
self.right[i].append(self.__title_icon_tag)
def render(self):
for o, output in enumerate(self.outputs):
output_left = "%{l}"
output_right = "%{r}"
end_sep = self.__separator_reset
end_line = self.__line_reset
left = self._filter(self.left, output)
right = self._filter(self.right, output)
# render segments
for i, seg in enumerate(left):
output_left += self._render_segment(
seg, POSITION_LEFT, first=(i == 0))
for i, seg in enumerate(right):
output_right += self._render_segment(
seg, POSITION_RIGHT, first=(i == 0))
print_nb(OUTPUT_FMT.format(output=o) + output_left +
(end_sep if len(left) != 0 else "") +
output_right + end_line)
print_nb("\n")
def _render_segment(self, tag, position=POSITION_LEFT,
first=False, index=0, count=0):
if len(tag) == 0:
return ""
output = ""
color_bg = tag[0]["color_bg"]
if position == POSITION_LEFT:
# draw separator
output += self.__escape_color(bg=color_bg)
if not first:
output += SEPARATOR_SEGMENT_RIGHT
# draw text
output += self.__escape_color(fg=tag[0]["color_fg"])
for i, t in enumerate(tag):
if i > 0:
# draw sub-separator
if t["color_bg"] != color_bg:
output += self.__escape_color(fg=color_bg,
bg=t["color_bg"]) + \
SEPARATOR_SEGMENT_RIGHT + \
self.__escape_color(fg=t["color_fg"],
bg=t["color_bg"])
color_bg = t["color_bg"]
else:
output += self.__escape_color(fg=COLOR_SEP) + \
SEPARATOR_PATH_RIGHT + \
self.__escape_color(fg=t["color_fg"])
action_end = ""
for b, a in enumerate(t["actions"]):
if a is not None:
output += ACTION_START_FMT.format(button=b+1, action=a)
action_end += ACTION_END_FMT.format(button=b+1)
output += t["text"] + action_end
# draw end (separator)
output += self.__escape_color(fg=color_bg)
elif position == POSITION_RIGHT:
# draw separator
if first:
output += self.__escape_color(bg="-")
output += self.__escape_color(fg=color_bg) + \
SEPARATOR_SEGMENT_LEFT
# draw text
output += self.__escape_color(fg=tag[0]["color_fg"],
bg=color_bg)
for i, t in enumerate(tag):
if i > 0:
# draw sub-separator
if t["color_bg"] != color_bg:
output += self.__escape_color(fg=t["color_bg"],
bg=color_bg) + \
SEPARATOR_SEGMENT_LEFT + \
self.__escape_color(fg=t["color_fg"],
bg=t["color_bg"])
color_bg = t["color_bg"]
else:
output += self.__escape_color(fg=COLOR_SEP) + \
SEPARATOR_PATH_LEFT + \
self.__escape_color(fg=t["color_fg"])
action_end = ""
for b, a in enumerate(t["actions"]):
if a is not None:
output += ACTION_START_FMT.format(button=b+1, action=a)
action_end += ACTION_END_FMT.format(button=b+1)
output += t["text"] + action_end
return output
def _filter(self, segments, output=None):
n_segs = []
for tags in segments:
n_tags = []
for t in tags:
if t["output"] is not None and output is not None \
and t["output"] != output:
continue
n_tags.append(t)
n_segs.append(n_tags)
return n_segs
def update_workspace(self, workspaces):
# clear old workspace
for i, exp in enumerate(Renderer.SEGMENTS_LEFT):
if exp.pattern in self.workspace_objs:
self.left[i].clear()
for i, exp in enumerate(Renderer.SEGMENTS_RIGHT):
if exp.pattern in self.workspace_objs:
self.right[i].clear()
# populate segment list
for ws in workspaces:
for i, exp in enumerate(Renderer.SEGMENTS_LEFT):
if exp.match("workspace"):
self.left[i].append(self.__workspace_filter(ws))
for i, exp in enumerate(Renderer.SEGMENTS_RIGHT):
if exp.match("workspace"):
self.right[i].append(self.__workspace_filter(ws))
def update_title(self, win):
self.clear_title()
# populate segment list
for i, exp in enumerate(Renderer.SEGMENTS_LEFT):
if exp.match("title"):
self.left[i].append(self.__title_filter(vars(win)))
for i, exp in enumerate(Renderer.SEGMENTS_RIGHT):
if exp.match("title"):
self.right[i].append(self.__title_filter(vars(win)))
def update_outputs(self, objects):
self.outputs.clear()
for o in objects:
if o.active and (self.show_on is None or o.name in self.show_on):
self.outputs.append(o.name)
def update_status(self, objects):
# clear old status
for i, exp in enumerate(Renderer.SEGMENTS_LEFT):
if exp.pattern in self.status_objs:
self.left[i].clear()
for i, exp in enumerate(Renderer.SEGMENTS_RIGHT):
if exp.pattern in self.status_objs:
self.right[i].clear()
# populate segment list
for tag in objects:
if "name" in tag.keys() and "full_text" in tag.keys():
for i, exp in enumerate(Renderer.SEGMENTS_LEFT):
if exp.match(tag["name"]):
self.left[i].append(self.__status_filter(tag))
for i, exp in enumerate(Renderer.SEGMENTS_RIGHT):
if exp.match(tag["name"]):
self.right[i].append(self.__status_filter(tag))
def __status_filter(self, tag):
new = self.__tag(tag["name"],
" " + ICON_EXP.sub(self.__escape_icon,
tag["full_text"]) + " ")
if new["name"] == "upower-battery":
new["text"] = new["text"].replace("Bat ", "") \
.replace("Full ", "") \
.replace("Chr ", "") \
.replace("Empty ", "")
new["color_bg"] = COLOR_STATUS_BATTERY_BG
if "color" in tag.keys():
new["color_fg"] = tag["color"]
if tag["color"] == COL_BAD:
new["text"] = new["text"].replace(ICON_BATTERY_HALF,
ICON_BATTERY_LOW)
elif new["name"] == "time":
new["actions"][0] = "date|toggle"
new["color_fg"] = COLOR_STATUS_TIME_FG
new["color_bg"] = COLOR_STATUS_TIME_BG
elif new["name"] == "pulseaudio":
new["actions"][0] = "volume|toggle"
new["color_bg"] = COLOR_STATUS_VOL_BG
if "color" in tag.keys() and tag["color"] == COL_BAD:
new["text"] = new["text"].replace(ICON_VOLUME_HIGH,
ICON_VOLUME_MUTE) \
.replace(ICON_VOLUME_MEDIUM,
ICON_VOLUME_MUTE) \
.replace(ICON_VOLUME_LOW,
ICON_VOLUME_MUTE)
new["color_bg"] = COLOR_STATUS_VOL_MUTE_BG
elif new["name"] == "backlight":
pass
if "urgent" in tag.keys() and tag["urgent"]:
new["color_fg"] = COLOR_STATUS_URGENT_FG
new["color_bg"] = COLOR_STATUS_URGENT_BG
return new
def __workspace_filter(self, ws):
new = self.__tag("workspace",
" " + STRIP_EXP.sub("\g<1>", ws.name) + " ",
actions=["i3|change-ws|" + ws.name.replace(":", "_"),
None, None],
color_fg=COLOR_WORKSPACE_INACTIVE_FG,
color_bg=COLOR_WORKSPACE_INACTIVE_BG)
new["output"] = ws.output
if ws.focused:
new["color_fg"] = COLOR_WORKSPACE_ACTIVE_FG
new["color_bg"] = COLOR_WORKSPACE_ACTIVE_BG
elif ws.urgent:
new["color_fg"] = COLOR_WORKSPACE_URGENT_FG
new["color_bg"] = COLOR_WORKSPACE_URGENT_BG
return new
def __title_filter(self, win):
new = self.__tag("title", " ")
if "name" in win.keys() and win["name"] is not None:
l = len(win["name"])
new["text"] += win["name"][:48] + \
("… " if l > 48 else " ")
if "urgent" in win.keys() and win["urgent"]:
new["color_fg"] = COLOR_TITLE_URGENT_FG
new["color_bg"] = COLOR_TITLE_URGENT_BG
return new
def __tag(self, name, text, output=None, actions=None,
color_fg=COL_DEFAULT_FG, color_bg=COL_DEFAULT_BG):
actions = actions if actions is not None else [None, None, None]
return {"name": name, "text": text,
"output": output, "actions": actions,
"color_fg": color_fg, "color_bg": color_bg}
def __escape_icon(self, match):
if match is None:
return ""
mode = match.group(1)
if mode == "BAT":
m = BAT_EXP.search(match.string)
if m is None:
return ICON_BATTERY_HALF
state = m.group(1)
if state == "Full":
return ICON_BATTERY_FULL
elif state == "Empty":
return ICON_BATTERY_EMPTY
elif state == "Chr":
return ICON_BATTERY_CHARGE
else:
return ICON_BATTERY_HALF
elif mode == "VOL":
m = VOL_EXP.search(match.string)
if m is None:
return ICON_VOLUME_HIGH
state = int(m.group(1))
if state < 10:
return ICON_VOLUME_LOW
elif state < 50:
return ICON_VOLUME_MEDIUM
else:
return ICON_VOLUME_HIGH
elif mode == "LIGHT":
return ICON_BACKLIGHT
elif mode[:3] == "NET":
iface = mode[4:]
if iface == "WIFI":
return ICON_NETWORK_WIFI
elif iface == "USB":
return ICON_NETWORK_USB
else:
return ICON_NETWORK_ETHER
elif mode == "TIME":
return ICON_TIME
elif mode == "CPU":
return ICON_CPU
elif mode == "RAM":
return ICON_MEM
elif mode == "TEMP":
return ICON_TEMP
else:
return ""
def __escape_color(self, fg=None, bg=None):
output = ""
if fg is not None:
output += "%{F" + fg + "}"
if bg is | |
Ensure that no other process is performing this procedure simultaneously. Thus, an inactive item selected will not become active unless it is done by this thread itself.
get = [item[1] for item in sorted([ (item["priority"],item) for item in self._queue if item["active"]==False and args["nick"] in item["nick"] ])] # Select inactive items which this peer can provide, decreasing priority
rebuild = []; # A list of items that have already been downloaded, and must be rebuilt.
while len(get)>0: # For each item in the list, check to see if it is viable for download
if self.transfer_verify(get[0]): # Return true if we can start the download.
# get[0]["active"] = True;
break # Activate this download, break out of the loop.
rebuild.append(get[0]); get = get[1:] # Move this item out of the download queue, into the rebuild queue.
self._download["lock"].release() # Release the lock ASAP, so that rebuilding doesnt block other threads.
for item in rebuild: self.transfer_rebuild(item) # Try to rebuild each file that is found to be completely downloaded.
self.debug("Checking for items that can be downloaded from "+args["nick"]+" : "+str(get))
if len(get)>0:
get = get[0]
get["active"] = True
self._download["downslots"]+=1
else: get = None
return get # The above loop will break when there are no items to download, or if an item has been selected.
def transfer_filename(self,item): # Calculates the filename & location for a queue item; INCOMPLETE : Verify permissions
location = (self._dir["downloads"] if (item["location"] is None or not os.path.isdir(item["location"])) else item["location"]) # Calculate location based to availability and accessibility
if item["name"].count(".")>0: # Count the number of dots to determine if there is an extension for this file.
extn = "."+item["name"].split(".")[-1]; item["name"] = ".".join(item["name"].split(".")[:-1]); # Isolate the extension which is available after the last dot
else: extn = "" # No extension for a file with no dots
suffix=0; filename = location+os.sep+item["name"]+extn; # Initializing the suffix and starting with a base filename
if self._config["overwrite"]:
while os.path.isfile(filename): # Repeat until a free filename is obtained
suffix+= 1; filename = location+os.sep+item["name"]+" ("+str(suffix)+")"+extn; # Add the incremented suffix to the base filename
# NOTICE : In Linux, also need to ensure that we have the required permissions in the target location.
return filename # This file is guaranteed to be accessible and writable.
def transfer_rebuild(self,get): # Called when all parts of a file are downloaded, to join them and make a whole file
more = False # Do more parts of the file exist in the download queue?
for item in self._queue: # For each item in the download queue, check the ID and the Name attr to identify other parts
if item["incomplete"]==get["incomplete"] and item["name"]==get["name"]: more = True # Found another part
all = True # Have all parts been downloaded and are complete in size?
tempname = self._dir["incomplete"]+os.sep+get["incomplete"]; # Generate the temporary name to be used multiple times later
if not more: # If there arent more parts to be downloaded
residue = ((get["size"]+self._config["segment_size"]-1)%self._config["segment_size"]+1) # Calculate the size of the last block
for i in range(get["parts"]):
filesize = os.path.getsize(tempname+".part"+str(i)) if os.path.isfile(tempname+".part"+str(i)) else -1
if get["type"]=="file":
pass # Leave filelists alone as they are always assumed to be one block.
elif filesize==-1:
all = False
redownload = copy.deepcopy(get); redownload["part"] = i; redownload["offset"] = i*self._config["segment_size"];
redownload["length"] = self._config["segment_size"] if i<get["parts"]-1 else residue; self._queue.append(redownload);
elif i<get["parts"]-1 and filesize!=self._config["segment_size"]:
all = False;
if filesize>self._config["segment_size"]:
os.remove(tempname+".part"+str(i)); filesize = 0;
redownload = copy.deepcopy(get); redownload["part"] = i; redownload["offset"] = i*self._config["segment_size"]+filesize;
redownload["length"] = self._config["segment_size"]-filesize; self._queue.append(redownload);
elif i==get["parts"]-1 and filesize!=residue:
all = False;
if filesize>residue:
os.remove(tempname+".part"+str(i)); filesize = 0;
redownload = copy.deepcopy(get); redownload["part"] = i; redownload["offset"] = i*self._config["segment_size"]+filesize;
redownload["length"] = residue-filesize; self._queue.append(redownload);
if not more and all: # If no more parts are to be downloaded, and all parts are available.
filename = self.transfer_filename(get) # Obtain the destination file name
get["filename"] = filename
handle_dest = open(filename,"wb") # Create file at new location, and manual transfer data from the temp location to that.
for i in range(get["parts"]):
handle_src = open(tempname+".part"+str(i),"rb")
blocksize = 1024*1024 # 1 MB blocks
while True: # Transfer data from source to destination
datablock = handle_src.read(blocksize)
if datablock=="": break
handle_dest.write(datablock)
handle_src.close()
handle_dest.close()
for i in range(get["parts"]): # Delete all source parts
while True:
try:
os.remove(tempname+".part"+str(i))
break
except WindowsError: time.sleep(1)
filesize = os.path.getsize(filename)
self.debug("Download complete : "+filename+" (FileSize: "+self.filesize(filesize)+")")
if get["location"]==self._dir["filelist"] and get["id"]==self._config["filelist"]: # Identify Filelists
if self.bz2_compress(filename,False): os.remove(filename) # Decompress filelists
def transfer_request(self,args,info): # Make the actual download request
self.debug("Requesting "+str(args["get"])+" from "+args["nick"]+" ("+info["host"]+":"+str(info["port"])+") ...")
tempname = self._dir["incomplete"]+os.sep+self.escape_filename(args["get"]["incomplete"])+".part"+str(args["get"]["part"])
try: # If the part file already exists as the result of an interrupted download, resume, instead of restarting.
if os.path.isfile(tempname):
filesize = os.path.getsize(tempname)
args["get"]["offset"] += filesize
args["get"]["length"] -= filesize
except: pass
return "$ADCGET "+("file" if args["get"]["type"]=="tth" else args["get"]["type"])+" "+("TTH/" if args["get"]["id"]!=self._config["filelist"] else "")+args["get"]["id"]+" "+str(args["get"]["offset"])+" "+str(args["get"]["length"])+(" ZL1" if "ZLIG" in args["support"] else "")+"|"
def transfer_download(self,args,info): # Read the connection buffer for new binary data, and save it.
length = min(len(args["buffer"]),args["more"])
args["handle"].write(args["buffer"][:length])
args["handle"].flush()
args["buffer"] = args["buffer"][length:]
args["more"]-=length
if args["more"]==0:
self.debug("Download complete : "+str(args["get"])+" from "+info["host"]+":"+str(info["port"])+".")
args["binary"] = False; args["handle"].close(); # Free up one download slot, enable writing to debug stream again and close file
x = [item for item in self._queue if (item["id"]==args["get"]["id"] and item["incomplete"]==args["get"]["incomplete"] and item["part"]==args["get"]["part"])] # Isolate item with matching signature
if len(x)==1 and x[0] in self._queue: self._queue.remove(x[0]) # Remove item from queue
self.transfer_rebuild(args["get"]) # Try rebuilding
if args["get"]["success_callback"]!=None:
try:
if args["get"]["success_callback_args"]!=None: args["get"]["success_callback"](args["get"]["filename"], args["get"]["success_callback_args"])
else: args["get"]["success_callback"](args["get"]["filename"])
except:
self.debug("Success Callback Function Error : "+str(args["get"]))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=10, file=(sys.stdout))
del args["get"] # Destroy the last reference to that queue item
args["get"] = self.transfer_next(args,info) # Try and select the next item to download
if args["get"] is not None: info["send"](self.transfer_request(args,info) ) # If there is such an item, start the download
else:
self._download["downslots"]-=1 # Return the allotted download slot for general use again
info["close"]() # Or else, terminate connection.
return args, info
def transfer_upload(self,args,info,x): # Response to an ADCGET Request;
if self._download["upslots"]==self._download["maxupslots"]:
info["send"]("$Error All download slots already taken.|")
return args,info
group = self.group_find(args["nick"]) # Calculate the group
if x[1]=="file" and x[2]==self._config["filelist"]: # If its a filelist
target = self._dir["filelist"]+os.sep+"#"+self.escape_filename(group,True)+".xml.bz2" # Select the appropriate on
elif x[1]=="file" and x[2].startswith("TTH/"): # If its a TTH specified file download
filelist = self._shared[group].getElementsByTagName("FileListing")[0] # Select the appropriate filelist
result = self.search_result_recursive(filelist,(None,None,"F","F",0,9,x[2][4:]),os.sep) # <ip>/<hub>, <port>/<nick>, isSizeRestricted, isMaxSize, size, fileType, searchTerm
if len(result)==0: # No Results
info["send"]("$Error File not found.|"); return args,info
else: # TTH Results were found.
target = result[0][0] # File Name relative to the paths added to the filelist
for path in self._filelist[group]:
if os.path.isdir(path): # If the path is a directory
if path.endswith(os.sep): path = path[:-1] # Remove the trailing slash
z = os.sep.join(path.split(os.sep)[:-1])+os.sep+target # Remove the folder name from the base path, as it appears in the final path too.
if os.path.isfile(z): # Verify that the file indeed exists
target = z; break # Target it and break out of the loop
elif os.path.isfile(path) and path.endswith(target): # If the file was directly shared,
target = path; break # Target and break out
else:
info["send"]("$Error Unsupported Request|")
return args,info
try: filesize = os.path.getsize(target)
except WindowsError as w:
print w
info["send"]("$Error File Access Error : "+target) #.split(os.sep)[-1]+"|")
return args,info
x[4] = str(filesize)
info["send"]("$ADCSND "+" ".join(x[1:])+"|")
self._download["upslots"]+=1
handle = open(target,"rb")
args["binary"] = True
for i in range(int( math.ceil(float(filesize)/self._config["segment_size"]) )):
info["send"](handle.read(self._config["segment_size"]))
args["binary"] = False
handle.close()
self._download["upslots"]-=1
return args,info
def transfer_handler(self,data,info,args): # Client-to-Client Handshake: Responds to data from remote host
if data is None:
if "host" not in args["transfer"]: args["transfer"]["host"]=info["host"]
if "port" not in args["transfer"]: args["transfer"]["port"]=info["port"]
if "buffer" not in args: # Initializations to be done when a TCP connection has just been set up.
if args["role"]=="client": info["send"]("$MyNick "+self.escape(self._config["nick"])+"|")
args = {"buffer":"", "binary":False, "support":[], "role":args["role"], "transfer":args["transfer"], "get":None, "error":False }
else: # Destructor
if args["get"] is not None and not args["error"]:
self.spawn("RemoteConnection:"+args["nick"],self.connect_remote,(args["nick"],True))
info["kill"]() # Release slots and kill server
return args
args["buffer"]+=data
if args["binary"]: # Binary Data Transfer Mode, placed before command interpretation because they may arrive immediately after transfers
args,info = self.transfer_download(args,info)
while not args["binary"]: # Exchange of commands
restart = False
for iteration in range(args["buffer"].count("|")):
length = args["buffer"].index("|")
if length==0:
args["buffer"] = args["buffer"][1:]
continue
data = args["buffer"][0:length]
args["buffer"] = args["buffer"][length+1:]
x = data.split()
if x[0]=="$MyNick":
args["nick"] = x[1]
self._userips[args["nick"]] = info["host"]
args["transfer"]["nick"] = x[1] # Save the nick in the transfer object for direct access
if args["role"]=="server":
info["send"]("$MyNick "+self.escape(self._config["nick"])+"|$Lock "+self._config["lock"]+" Pk="+self._config["signature"]+"|")
elif x[0]=="$Lock":
args["lock"] = x[1]
if args["role"]=="client": info["send"]("$Lock "+self._config["lock"]+" Pk="+self._config["signature"]+"|")
elif args["role"]=="server":
args["get"] = self.transfer_next(args,info)
args["rand1"] = 32766 # random.randint(0,32767)
info["send"]("$Supports "+self._config["support"]+"|$Direction "+("Download" if args["get"] is not None else "Upload")+" "+str(args["rand1"])+"|$Key "+self.lock2key(args["lock"])+"|")
elif x[0]=="$Supports":
args["support"] = x[1:]
elif x[0]=="$Direction":
args["dir"] = x[1]
args["rand2"] = int(x[2])
elif x[0]=="$Key":
args["key"] = " ".join(x[1:])
if self._config["key"]!=args["key"]:
info["close"](); continue;
while args["role"]=="client":
args["rand1"] = random.randint(0,32767)
if args["rand1"]!=args["rand2"]: break
if args["role"] =="client":
args["get"] = self.transfer_next(args,info)
info["send"]("$Supports "+self._config["support"]+"|$Direction "+("Download" if args["get"] is not None else "Upload")+" "+str(args["rand1"])+"|$Key "+self.lock2key(args["lock"])+"|")
if args["get"] is not None and (args["dir"]=="Upload" or args["rand1"]>args["rand2"]): # If peer doest want to download, or if its random number is smaller, we can download
info["send"](self.transfer_request(args,info))
if args["get"] is not None and args["dir"]=="Upload": info["kill"]() # Neither side wants to download, so break the connection
elif x[0]=="$ADCGET":
# args,info = self.transfer_upload(args,info,x) # All uploads currently disabled.
info["send"]("$Error You | |
key=lambda s: s[1]):
if not isinstance(name, tuple):
# one of our special columns, ignore:
continue
visible = columns_visible[column_index]
visible_item = QtGui.QStandardItem()
visible_item.setCheckable(True)
if visible:
visible_item.setCheckState(QtCore.Qt.Checked)
visible_item.setData(QtCore.Qt.Checked, self.ROLE_SORT_DATA)
else:
visible_item.setCheckState(QtCore.Qt.Unchecked)
visible_item.setData(QtCore.Qt.Unchecked, self.ROLE_SORT_DATA)
name_as_string = ', '.join(name).strip(', ')
name_item = QtGui.QStandardItem(name_as_string)
name_item.setData(name_as_string, self.ROLE_SORT_DATA)
self.model.appendRow([visible_item, name_item])
self.column_indices[self.model.rowCount() - 1] = column_index
self.ui.treeView.resizeColumnToContents(self.COL_NAME)
self.update_select_all_checkstate()
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
def on_treeView_context_menu_requested(self, point):
menu = QtGui.QMenu(self.ui)
menu.addAction(self.action_set_selected_visible)
menu.addAction(self.action_set_selected_hidden)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, visible):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(self.proxy_model.mapToSource(index).row() for index in selected_indexes)
for row in selected_rows:
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, visible)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def on_filter_text_edited(self, text):
self.proxy_model.setFilterWildcard(text)
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, state)
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def update_visible_state(self, item, state):
assert item.column() == self.COL_VISIBLE, "unexpected column"
row = item.row()
with self.model_item_changed_disconnected:
item.setCheckState(state)
item.setData(state, self.ROLE_SORT_DATA)
if state == QtCore.Qt.Checked:
self.columns_visible[self.column_indices[row]] = True
else:
self.columns_visible[self.column_indices[row]] = False
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
all_states.append(visible_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
def on_model_item_changed(self, item):
state = item.checkState()
self.update_visible_state(item, state)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def do_sort(self):
header = self.ui.treeView.header()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.treeView.sortByColumn(sort_column, sort_order)
def update_columns(self, column_names, columns_visible):
# Index/name mapping may have changed. Get a mapping by *name* of
# which columns were previously visible, so we can update our by-index
# mapping in a moment:
old_columns_visible_by_name = {}
for old_column_number, visible in self.old_columns_visible.items():
column_name = self.column_names[old_column_number]
old_columns_visible_by_name[column_name] = visible
self.columns_visible = columns_visible.copy()
self.column_names = column_names.copy()
# Update the by-index mapping of which columns were visible before editing:
self.old_columns_visible = {}
for index, name in self.column_names.items():
try:
self.old_columns_visible[index] = old_columns_visible_by_name[name]
except KeyError:
# A new column. If editing is cancelled, any new columns
# should be set to visible:
self.old_columns_visible[index] = True
self.populate_model(column_names, self.columns_visible)
def show(self):
self.old_columns_visible = self.columns_visible.copy()
self.ui.show()
def close(self):
self.columns_visible = self.old_columns_visible.copy()
self.filebox.set_columns_visible(self.columns_visible)
self.populate_model(self.column_names, self.columns_visible)
self.ui.hide()
def cancel(self):
self.ui.close()
def make_it_so(self):
self.ui.hide()
class ItemDelegate(QtGui.QStyledItemDelegate):
"""An item delegate with a fixed height and a progress bar in one column"""
EXTRA_ROW_HEIGHT = 2
def __init__(self, view, model, col_status, role_status_percent):
self.view = view
self.model = model
self.COL_STATUS = col_status
self.ROLE_STATUS_PERCENT = role_status_percent
QtGui.QStyledItemDelegate.__init__(self)
def sizeHint(self, *args):
fontmetrics = QtGui.QFontMetrics(self.view.font())
text_height = fontmetrics.height()
row_height = text_height + self.EXTRA_ROW_HEIGHT
size = QtGui.QStyledItemDelegate.sizeHint(self, *args)
return QtCore.QSize(size.width(), row_height)
def paint(self, painter, option, index):
if index.column() == self.COL_STATUS:
status_percent = self.model.data(index, self.ROLE_STATUS_PERCENT)
if status_percent == 100:
# Render as a normal item - this shows whatever icon is set instead of a progress bar.
return QtGui.QStyledItemDelegate.paint(self, painter, option, index)
else:
# Method of rendering a progress bar into the view copied from
# Qt's 'network-torrent' example:
# http://qt-project.org/doc/qt-4.8/network-torrent-torrentclient-cpp.html
# Set up a QStyleOptionProgressBar to precisely mimic the
# environment of a progress bar.
progress_bar_option = QtGui.QStyleOptionProgressBarV2()
progress_bar_option.state = QtGui.QStyle.State_Enabled
progress_bar_option.direction = qapplication.layoutDirection()
progress_bar_option.rect = option.rect
progress_bar_option.fontMetrics = qapplication.fontMetrics()
progress_bar_option.minimum = 0
progress_bar_option.maximum = 100
progress_bar_option.textAlignment = QtCore.Qt.AlignCenter
progress_bar_option.textVisible = True
# Set the progress and text values of the style option.
progress_bar_option.progress = status_percent
progress_bar_option.text = '%d%%' % status_percent
# Draw the progress bar onto the view.
qapplication.style().drawControl(QtGui.QStyle.CE_ProgressBar, progress_bar_option, painter)
else:
return QtGui.QStyledItemDelegate.paint(self, painter, option, index)
class UneditableModel(QtGui.QStandardItemModel):
def flags(self, index):
"""Return flags as normal except that the ItemIsEditable
flag is always False"""
result = QtGui.QStandardItemModel.flags(self, index)
return result & ~QtCore.Qt.ItemIsEditable
class TableView(QtGui.QTableView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTableView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click. Multiple inheritance of QObjects is not possible, so we
are forced to duplicate code instead of sharing code with the extremely
similar TreeView class in this module"""
def __init__(self, *args):
QtGui.QTableView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtGui.QTableView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtGui.QTableView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtGui.QTableView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtGui.QTableView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class DataFrameModel(QtCore.QObject):
COL_STATUS = 0
COL_FILEPATH = 1
ROLE_STATUS_PERCENT = QtCore.Qt.UserRole + 1
columns_changed = Signal()
def __init__(self, view, exp_config):
QtCore.QObject.__init__(self)
self._view = view
self.exp_config = exp_config
self._model = UneditableModel()
headerview_style = """
QHeaderView {
font-size: 8pt;
color: black;
}
QHeaderView::section{
font-size: 8pt;
color: black;
}
"""
self._header = HorizontalHeaderViewWithWidgets(self._model)
self._vertheader = QtGui.QHeaderView(QtCore.Qt.Vertical)
self._vertheader.setResizeMode(QtGui.QHeaderView.Fixed)
self._vertheader.setStyleSheet(headerview_style)
self._header.setStyleSheet(headerview_style)
self._vertheader.setHighlightSections(True)
self._vertheader.setClickable(True)
self._view.setModel(self._model)
self._view.setHorizontalHeader(self._header)
self._view.setVerticalHeader(self._vertheader)
self._delegate = ItemDelegate(self._view, self._model, self.COL_STATUS, self.ROLE_STATUS_PERCENT)
self._view.setItemDelegate(self._delegate)
self._view.setSelectionBehavior(QtGui.QTableView.SelectRows)
self._view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# This dataframe will contain all the scalar data
# from the shot files that are currently open:
index = pandas.MultiIndex.from_tuples([('filepath', '')])
self.dataframe = pandas.DataFrame({'filepath': []}, columns=index)
# How many levels the dataframe's multiindex has:
self.nlevels = self.dataframe.columns.nlevels
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('status/progress of single-shot analysis')
self._model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
filepath_item = QtGui.QStandardItem('filepath')
filepath_item.setToolTip('filepath')
self._model.setHorizontalHeaderItem(self.COL_FILEPATH, filepath_item)
self._view.setColumnWidth(self.COL_STATUS, 70)
self._view.setColumnWidth(self.COL_FILEPATH, 100)
# Column indices to names and vice versa for fast lookup:
self.column_indices = {'__status': self.COL_STATUS, ('filepath', ''): self.COL_FILEPATH}
self.column_names = {self.COL_STATUS: '__status', self.COL_FILEPATH: ('filepath', '')}
self.columns_visible = {self.COL_STATUS: True, self.COL_FILEPATH: True}
# Whether or not a deleted column was visible at the time it was deleted (by name):
self.deleted_columns_visible = {}
# Make the actions for the context menu:
self.action_remove_selected = QtGui.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected shots', self._view)
self.connect_signals()
def connect_signals(self):
self._view.customContextMenuRequested.connect(self.on_view_context_menu_requested)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
def get_model_row_by_filepath(self, filepath):
filepath_colname = ('filepath',) + ('',) * (self.nlevels - 1)
possible_items = self._model.findItems(filepath, column=self.column_indices[filepath_colname])
if len(possible_items) > 1:
raise LookupError('Multiple items found')
elif not possible_items:
raise LookupError('No item found')
item = possible_items[0]
index = item.index()
return index.row()
def on_remove_selection(self, confirm=True):
selection_model = self._view.selectionModel()
selected_indexes = selection_model.selectedRows()
selected_name_items = [self._model.itemFromIndex(index) for index in selected_indexes]
if not selected_name_items:
return
if confirm and not question_dialog("Remove %d shots?" % len(selected_name_items)):
return
# Remove from DataFrame first:
self.dataframe = self.dataframe.drop(index.row() for index in selected_indexes)
self.dataframe.index = pandas.Index(range(len(self.dataframe)))
# Delete one at a time from Qt model:
for name_item in selected_name_items:
row = name_item.row()
self._model.removeRow(row)
def mark_selection_not_done(self):
selected_indexes = self._view.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
status_item = self._model.item(row, self.COL_STATUS)
status_item.setData(0, self.ROLE_STATUS_PERCENT)
def on_view_context_menu_requested(self, point):
menu = QtGui.QMenu(self._view)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_double_click(self, index):
filepath_item = self._model.item(index.row(), self.COL_FILEPATH)
shot_filepath = filepath_item.text()
# get path to text editor
viewer_path = self.exp_config.get('programs', 'hdf5_viewer')
viewer_args = self.exp_config.get('programs', 'hdf5_viewer_arguments')
# Get the current labscript file:
if not viewer_path:
error_dialog("No hdf5 viewer specified in the labconfig.")
if '{file}' in viewer_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
viewer_args = [arg if arg != '{file}' else shot_filepath for arg in viewer_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
viewer_args = [shot_filepath] + viewer_args.split()
try:
subprocess.Popen([viewer_path] + viewer_args)
except Exception as e:
error_dialog("Unable to launch hdf5 viewer specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def set_columns_visible(self, columns_visible):
self.columns_visible = columns_visible
for column_index, visible in columns_visible.items():
self._view.setColumnHidden(column_index, not visible)
def update_column_levels(self):
"""Pads the keys and values of our lists of column names so that
they still match those in the dataframe after the number of
levels in its multiindex has increased"""
extra_levels = self.dataframe.columns.nlevels - self.nlevels
if extra_levels > 0:
self.nlevels = self.dataframe.columns.nlevels
column_indices = {}
column_names = {}
for column_name in self.column_indices:
if not isinstance(column_name, tuple):
# It's one of our special columns
new_column_name = column_name
else:
new_column_name = column_name + ('',) * extra_levels
column_index = self.column_indices[column_name]
column_indices[new_column_name] = column_index
column_names[column_index] = new_column_name
self.column_indices | |
counts by position.
def one_row(r):
digits = [x for x in r.Protein_Change if x.isdigit()]
if not digits:
return np.nan
return int(''.join(digits))
df4['Protein_Position'] = df4.apply(one_row, axis=1)
df4 = df4.dropna(subset=['Protein_Position'])
df4 = df4.sort_values(['Protein_Position'])
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
for i, nonsyn_name in enumerate(NONSYN_NAMES):
temp = df4[df4.Variant_Classification == nonsyn_name]
color = NONSYN_COLORS[i]
ax.vlines(temp.Protein_Position, ymin=0, ymax=temp.Count,
alpha=alpha, color=color)
ax.plot(temp.Protein_Position, temp.Count, 'o', alpha=alpha,
color=color, label=nonsyn_name)
ax.set_xlabel('Position')
ax.set_ylabel('Count')
if legend:
ax.legend()
return ax
def plot_mutated(
self, af=None, group_col=None, group_order=None, genes=None,
count=10, ax=None, figsize=None
):
"""
Create a bar plot visualizing the mutation prevalence of top
mutated genes.
Parameters
----------
af : AnnFrame, optional
AnnFrame containing sample annotation data.
group_col : str, optional
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
genes : list, optional
Genes to display. When absent, top mutated genes (``count``) will
be used.
count : int, defualt: 10
Number of top mutated genes to display. Ignored if ``genes`` is
specified.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.barplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_mutated()
>>> plt.tight_layout()
We can create a grouped bar plot based on FAB classification:
.. plot::
:context: close-figs
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_mutated(af=af,
... group_col='FAB_classification',
... group_order=['M0', 'M1', 'M2'])
>>> plt.tight_layout()
"""
df = self.matrix_prevalence()
# Determine which genes to display.
if genes is None:
genes = self.matrix_genes(count=count).index.to_list()
df = df.loc[genes]
df = df.applymap(lambda x: True if x else False)
if group_col is None:
df = (df.sum(axis=1) / df.shape[1]).to_frame().reset_index()
df.columns.values[1] = 'Prevalence'
else:
df = df.T
df = pd.merge(df, af.df[group_col], left_index=True, right_index=True)
df = df.groupby([group_col]).mean().reset_index()
df = df.melt(id_vars=[group_col])
df.columns = [group_col, 'Hugo_Symbol', 'Prevalence']
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.barplot(
x='Hugo_Symbol', y='Prevalence', data=df, hue=group_col,
hue_order=group_order, ax=ax
)
ax.set_xlabel('')
return ax
def plot_mutated_matched(
self, af, patient_col, group_col, group_order, ax=None, figsize=None,
**kwargs
):
"""
Create a bar plot visualizing the mutation prevalence of top
mutated genes.
Parameters
----------
af : AnnFrame
AnnFrame containing sample annotation data.
patient_col : str
AnnFrame column containing patient information.
group_col : str
AnnFrame column containing sample group information.
group_order : list
List of sample group names.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.barplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
"""
df = self.matrix_waterfall_matched(af, patient_col, group_col, group_order)
df = df.applymap(lambda x: 0 if x == 'None' else 1)
s = df.sum(axis=1) / len(df.columns) * 100
s.name = 'Count'
df = s.to_frame().reset_index()
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
sns.barplot(
x='Count', y='Gene', hue='Group', data=df, hue_order=group_order,
orient='h', ax=ax, **kwargs
)
ax.set_xlabel('Patients (%)')
ax.set_ylabel('')
return ax
def plot_rainfall(
self, sample, palette=None, legend='auto', ax=None, figsize=None,
**kwargs
):
"""
Create a rainfall plot visualizing inter-variant distance on a linear
genomic scale for single sample.
Parameters
----------
sample : str
Name of the sample.
palette : str, optional
Name of the seaborn palette. See the :ref:`tutorials:Control plot
colors` tutorial for details.
legend : {'auto', 'brief', 'full', False}, default: 'auto'
Display setting of the legend according to
:meth:`seaborn.scatterplot`.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.scatterplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns
>>> from fuc import common, pymaf
>>> common.load_dataset('brca')
>>> maf_file = '~/fuc-data/brca/brca.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_rainfall('TCGA-A8-A08B',
... figsize=(14, 7),
... palette=sns.color_palette('Set2')[:6])
>>> plt.tight_layout()
"""
# Select variants from the sample.
df = self.df[self.df.Tumor_Sample_Barcode == sample]
# Remove indels.
df = df[df.Variant_Type == 'SNP']
# Raise an error if there are no SNVs to plot.
if df.empty:
message = (
'There are no SNVs to be drawn '
f"for the sample: '{sample}'."
)
raise ValueError(message)
# Get SNV class for each variant.
def one_row(r):
change = r.Reference_Allele + '>' + r.Tumor_Seq_Allele2
return SNV_CLASSES[change]['class']
df['SNV_Class'] = df.apply(one_row, axis=1)
# Convert string chromosomes to integers for ordering.
def one_row(r):
r.Chromosome = int(r.Chromosome.replace(
'chr', '').replace('X', '23').replace('Y', '24'))
return r
df = df.apply(one_row, axis=1)
df = df[['Chromosome', 'Start_Position', 'SNV_Class']]
df = df.sort_values(['Chromosome', 'Start_Position'])
# Update positions as if all chromosomes are one long molecule.
def one_row(r):
if r.Chromosome == 1:
return r
r.Start_Position += sum(CHROM_LENGTHS['hg19'][:r.Chromosome-1])
return r
df = df.apply(one_row, axis=1)
s = np.diff(df.Start_Position)
s = np.insert(s, 0, 0)
s = np.log10(s + 1)
df['Interevent_Distance'] = s
df = df.reset_index(drop=True)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
bounds = [0] + df.drop_duplicates(subset=['Chromosome'],
keep='last').index.to_list()
xticks = []
for i, bound in enumerate(bounds):
if i == 0:
continue
elif i == 1:
xticks.append(bound / 2)
else:
xticks.append(bounds[i-1] + (bound - bounds[i-1]) / 2)
for bound in bounds:
ax.axvline(x=bound, color='lightgray', zorder=1)
sns.scatterplot(
x=df.index, y='Interevent_Distance', data=df, hue='SNV_Class',
hue_order=SNV_CLASS_ORDER, palette=palette, ax=ax, legend=legend,
zorder=2, **kwargs
)
ax.set_xlabel('Chromosomes')
ax.set_ylabel('Interevent distance')
ax.set_xticks(xticks)
ax.set_xticklabels(['X' if x == 23 else 'Y' if x == 24 else x
for x in df.Chromosome.unique()])
return ax
def plot_snvclsc(
self, af=None, group_col=None, group_order=None, palette=None,
flip=False, ax=None, figsize=None, **kwargs
):
"""
Create a bar plot summarizing the count distrubtions of the six
:ref:`glossary:SNV classes` for all samples.
A grouped bar plot can be created with ``group_col`` (requires an AnnFrame).
Parameters
----------
af : AnnFrame, optional
AnnFrame containing sample annotation data.
group_col : str, optional
AnnFrame column containing sample group information.
group_order : list, optional
List of sample group names.
palette : str, optional
Name of the seaborn palette. See the :ref:`tutorials:Control plot
colors` tutorial for details.
flip : bool, default: False
If True, flip the x and y axes.
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, crete a new one.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
kwargs
Other keyword arguments will be passed down to
:meth:`seaborn.barplot`.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the plot.
See Also
--------
MafFrame.plot_snvclsp
Create a box plot summarizing the proportion distrubtions of
the six :ref:`glossary:SNV classes` for all sample.
MafFrame.plot_snvclss
Create a bar plot showing the proportions of the six
:ref:`glossary:SNV classes` for individual samples.
Examples
--------
Below is a simple example:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns
>>> from fuc import common, pymaf
>>> common.load_dataset('tcga-laml')
>>> maf_file = '~/fuc-data/tcga-laml/tcga_laml.maf.gz'
>>> mf = pymaf.MafFrame.from_file(maf_file)
>>> mf.plot_snvclsc(palette=sns.color_palette('Dark2'))
>>> plt.tight_layout()
We can create a grouped bar plot based on FAB classification:
.. plot::
:context: close-figs
>>> annot_file = '~/fuc-data/tcga-laml/tcga_laml_annot.tsv'
>>> af = common.AnnFrame.from_file(annot_file, sample_col=0)
>>> mf.plot_snvclsc(af=af,
... group_col='FAB_classification',
... group_order=['M0', 'M1', 'M2'])
>>> plt.tight_layout()
"""
# Add the SNV_Class column.
df = self.df[self.df.Variant_Type == 'SNP']
def one_row(r):
change = r.Reference_Allele + '>' + r.Tumor_Seq_Allele2
return SNV_CLASSES[change]['class']
s = df.apply(one_row, axis=1)
s.name = 'SNV_Class'
df = pd.concat([df, s], axis=1)
# Count the occurance of each SNV class.
if group_col is not None:
df = pd.merge(df, af.df[group_col], left_on='Tumor_Sample_Barcode',
right_index=True)
s = df.groupby([group_col]).SNV_Class.value_counts()
df = s.to_frame().rename(columns={'SNV_Class': 'Count'}
).reset_index()
else:
s = df.SNV_Class.value_counts()
df = s.to_frame().reset_index()
df.columns = ['SNV_Class', 'Count']
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if flip:
x, y = 'Count', 'SNV_Class'
xlabel, ylabel = 'Count', ''
else:
x, y = 'SNV_Class', 'Count'
xlabel, ylabel = '', 'Count'
sns.barplot(
x=x, y=y, data=df, ax=ax, hue=group_col, hue_order=group_order,
palette=palette, order=SNV_CLASS_ORDER, **kwargs
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_snvclsp(
self, af=None, group_col=None, | |
<filename>pret/redtools/gbz80disasm.py
from __future__ import print_function
from __future__ import absolute_import
#author: <NAME> <<EMAIL>>
#date: 2012-01-09
from . import extract_maps
import os
import json
from copy import copy, deepcopy
from .pretty_map_headers import random_hash, map_name_cleaner
from ctypes import c_int8
import sys
spacing = "\t"
temp_opt_table = [
[ "ADC A", 0x8f, 0 ],
[ "ADC B", 0x88, 0 ],
[ "ADC C", 0x89, 0 ],
[ "ADC D", 0x8a, 0 ],
[ "ADC E", 0x8b, 0 ],
[ "ADC H", 0x8c, 0 ],
[ "ADC [HL]", 0x8e, 0 ],
[ "ADC L", 0x8d, 0 ],
[ "ADC x", 0xce, 1 ],
[ "ADD A", 0x87, 0 ],
[ "ADD B", 0x80, 0 ],
[ "ADD C", 0x81, 0 ],
[ "ADD D", 0x82, 0 ],
[ "ADD E", 0x83, 0 ],
[ "ADD H", 0x84, 0 ],
[ "ADD [HL]", 0x86, 0 ],
[ "ADD HL, BC", 0x9, 0 ],
[ "ADD HL, DE", 0x19, 0 ],
[ "ADD HL, HL", 0x29, 0 ],
[ "ADD HL, SP", 0x39, 0 ],
[ "ADD L", 0x85, 0 ],
[ "ADD SP, x", 0xe8, 1 ],
[ "ADD x", 0xc6, 1 ],
[ "AND A", 0xa7, 0 ],
[ "AND B", 0xa0, 0 ],
[ "AND C", 0xa1, 0 ],
[ "AND D", 0xa2, 0 ],
[ "AND E", 0xa3, 0 ],
[ "AND H", 0xa4, 0 ],
[ "AND [HL]", 0xa6, 0 ],
[ "AND L", 0xa5, 0 ],
[ "AND x", 0xe6, 1 ],
[ "BIT 0, A", 0x47cb, 3 ],
[ "BIT 0, B", 0x40cb, 3 ],
[ "BIT 0, C", 0x41cb, 3 ],
[ "BIT 0, D", 0x42cb, 3 ],
[ "BIT 0, E", 0x43cb, 3 ],
[ "BIT 0, H", 0x44cb, 3 ],
[ "BIT 0, [HL]", 0x46cb, 3 ],
[ "BIT 0, L", 0x45cb, 3 ],
[ "BIT 1, A", 0x4fcb, 3 ],
[ "BIT 1, B", 0x48cb, 3 ],
[ "BIT 1, C", 0x49cb, 3 ],
[ "BIT 1, D", 0x4acb, 3 ],
[ "BIT 1, E", 0x4bcb, 3 ],
[ "BIT 1, H", 0x4ccb, 3 ],
[ "BIT 1, [HL]", 0x4ecb, 3 ],
[ "BIT 1, L", 0x4dcb, 3 ],
[ "BIT 2, A", 0x57cb, 3 ],
[ "BIT 2, B", 0x50cb, 3 ],
[ "BIT 2, C", 0x51cb, 3 ],
[ "BIT 2, D", 0x52cb, 3 ],
[ "BIT 2, E", 0x53cb, 3 ],
[ "BIT 2, H", 0x54cb, 3 ],
[ "BIT 2, [HL]", 0x56cb, 3 ],
[ "BIT 2, L", 0x55cb, 3 ],
[ "BIT 3, A", 0x5fcb, 3 ],
[ "BIT 3, B", 0x58cb, 3 ],
[ "BIT 3, C", 0x59cb, 3 ],
[ "BIT 3, D", 0x5acb, 3 ],
[ "BIT 3, E", 0x5bcb, 3 ],
[ "BIT 3, H", 0x5ccb, 3 ],
[ "BIT 3, [HL]", 0x5ecb, 3 ],
[ "BIT 3, L", 0x5dcb, 3 ],
[ "BIT 4, A", 0x67cb, 3 ],
[ "BIT 4, B", 0x60cb, 3 ],
[ "BIT 4, C", 0x61cb, 3 ],
[ "BIT 4, D", 0x62cb, 3 ],
[ "BIT 4, E", 0x63cb, 3 ],
[ "BIT 4, H", 0x64cb, 3 ],
[ "BIT 4, [HL]", 0x66cb, 3 ],
[ "BIT 4, L", 0x65cb, 3 ],
[ "BIT 5, A", 0x6fcb, 3 ],
[ "BIT 5, B", 0x68cb, 3 ],
[ "BIT 5, C", 0x69cb, 3 ],
[ "BIT 5, D", 0x6acb, 3 ],
[ "BIT 5, E", 0x6bcb, 3 ],
[ "BIT 5, H", 0x6ccb, 3 ],
[ "BIT 5, [HL]", 0x6ecb, 3 ],
[ "BIT 5, L", 0x6dcb, 3 ],
[ "BIT 6, A", 0x77cb, 3 ],
[ "BIT 6, B", 0x70cb, 3 ],
[ "BIT 6, C", 0x71cb, 3 ],
[ "BIT 6, D", 0x72cb, 3 ],
[ "BIT 6, E", 0x73cb, 3 ],
[ "BIT 6, H", 0x74cb, 3 ],
[ "BIT 6, [HL]", 0x76cb, 3 ],
[ "BIT 6, L", 0x75cb, 3 ],
[ "BIT 7, A", 0x7fcb, 3 ],
[ "BIT 7, B", 0x78cb, 3 ],
[ "BIT 7, C", 0x79cb, 3 ],
[ "BIT 7, D", 0x7acb, 3 ],
[ "BIT 7, E", 0x7bcb, 3 ],
[ "BIT 7, H", 0x7ccb, 3 ],
[ "BIT 7, [HL]", 0x7ecb, 3 ],
[ "BIT 7, L", 0x7dcb, 3 ],
[ "CALL C, ?", 0xdc, 2 ],
[ "CALL NC, ?", 0xd4, 2 ],
[ "CALL NZ, ?", 0xc4, 2 ],
[ "CALL Z, ?", 0xcc, 2 ],
[ "CALL ?", 0xcd, 2 ],
[ "CCF", 0x3f, 0 ],
[ "CP A", 0xbf, 0 ],
[ "CP B", 0xb8, 0 ],
[ "CP C", 0xb9, 0 ],
[ "CP D", 0xba, 0 ],
[ "CP E", 0xbb, 0 ],
[ "CP H", 0xbc, 0 ],
[ "CP [HL]", 0xbe, 0 ],
[ "CPL", 0x2f, 0 ],
[ "CP L", 0xbd, 0 ],
[ "CP x", 0xfe, 1 ],
[ "DAA", 0x27, 0 ],
[ "DEBUG", 0xed, 0 ],
[ "DEC A", 0x3d, 0 ],
[ "DEC B", 0x5, 0 ],
[ "DEC BC", 0xb, 0 ],
[ "DEC C", 0xd, 0 ],
[ "DEC D", 0x15, 0 ],
[ "DEC DE", 0x1b, 0 ],
[ "DEC E", 0x1d, 0 ],
[ "DEC H", 0x25, 0 ],
[ "DEC HL", 0x2b, 0 ],
[ "DEC [HL]", 0x35, 0 ],
[ "DEC L", 0x2d, 0 ],
[ "DEC SP", 0x3b, 0 ],
[ "DI", 0xf3, 0 ],
[ "EI", 0xfb, 0 ],
[ "HALT", 0x76, 0 ],
[ "INC A", 0x3c, 0 ],
[ "INC B", 0x4, 0 ],
[ "INC BC", 0x3, 0 ],
[ "INC C", 0xc, 0 ],
[ "INC D", 0x14, 0 ],
[ "INC DE", 0x13, 0 ],
[ "INC E", 0x1c, 0 ],
[ "INC H", 0x24, 0 ],
[ "INC HL", 0x23, 0 ],
[ "INC [HL]", 0x34, 0 ],
[ "INC L", 0x2c, 0 ],
[ "INC SP", 0x33, 0 ],
[ "JP C, ?", 0xda, 2 ],
[ "JP HL", 0xe9, 0 ],
[ "JP NC, ?", 0xd2, 2 ],
[ "JP NZ, ?", 0xc2, 2 ],
[ "JP Z, ?", 0xca, 2 ],
[ "JP ?", 0xc3, 2 ],
[ "JR C, x", 0x38, 1 ],
[ "JR NC, x", 0x30, 1 ],
[ "JR NZ, x", 0x20, 1 ],
[ "JR Z, x", 0x28, 1 ],
[ "JR x", 0x18, 1 ],
[ "LD A, A", 0x7f, 0 ],
[ "LD A, B", 0x78, 0 ],
[ "LD A, C", 0x79, 0 ],
[ "LD A, D", 0x7a, 0 ],
[ "LD A, E", 0x7b, 0 ],
[ "LD A, H", 0x7c, 0 ],
[ "LD A, L", 0x7d, 0 ],
[ "LD A, [$FF00+C]", 0xf2, 0 ],
[ "LD A, [$FF00+x]", 0xf0, 1 ],
# [ "LDH A, [x]", 0xf0, 1 ], #rgbds has trouble with this one?
[ "LD A, [BC]", 0xa, 0 ],
[ "LD A, [DE]", 0x1a, 0 ],
# [ "LD A, [HL+]", 0x2a, 0 ],
# [ "LD A, [HL-]", 0x3a, 0 ],
[ "LD A, [HL]", 0x7e, 0 ],
[ "LD A, [HLD]", 0x3a, 0 ],
[ "LD A, [HLI]", 0x2a, 0 ],
[ "LD A, [?]", 0xfa, 2 ],
[ "LD A, x", 0x3e, 1 ],
[ "LD B, A", 0x47, 0 ],
[ "LD B, B", 0x40, 0 ],
[ "LD B, C", 0x41, 0 ],
[ "LD [BC], A", 0x2, 0 ],
[ "LD B, D", 0x42, 0 ],
[ "LD B, E", 0x43, 0 ],
[ "LD B, H", 0x44, 0 ],
[ "LD B, [HL]", 0x46, 0 ],
[ "LD B, L", 0x45, 0 ],
[ "LD B, x", 0x6, 1 ],
[ "LD C, A", 0x4f, 0 ],
[ "LD C, B", 0x48, 0 ],
[ "LD C, C", 0x49, 0 ],
[ "LD C, D", 0x4a, 0 ],
[ "LD C, E", 0x4b, 0 ],
[ "LD C, H", 0x4c, 0 ],
[ "LD C, [HL]", 0x4e, 0 ],
[ "LD C, L", 0x4d, | |
# Starts from OS Mastermap base map and:
# 1. Assigns CEH Landcover map (LCM) definition of either Arable or Improved grassland to agricultural land polygons
# 2. Assigns Rural Payments Agency CROME Crop map data (input must be dissolved by land use code and joined to description
# and simplified description (Arable, Improved grassland, Short-rotation coppice)
# 3. Assigns Natural England Priority Habitat data.
# Set up to loop through a set of Local Authority Districts
# -----------------------------------------------------------------------------------------------------------------
import time
import arcpy
import os
import MyFunctions
arcpy.CheckOutExtension("Spatial")
print(''.join(["## Started on : ", time.ctime()]))
arcpy.env.overwriteOutput = True # Overwrites files
arcpy.env.qualifiedFieldNames = False # Joined fields will be exported without the join table name
arcpy.env.XYTolerance = "0.001 Meters"
# region = "Oxon"
region = "Arc"
# method = "HLU"
method = "CROME_LCM_PHI"
if method == "CROME_LCM_PHI":
folder = r"C:\Users\cenv0389\Documents\Oxon_GIS\OxCamArc"
if region == "Arc":
LADs_included = ["Bedfordshire", "Buckinghamshire", "Cambridgeshire", "Northamptonshire"]
Hab_field = "Interpreted_habitat"
elif region == "Oxon":
LADs_included = ["Oxfordshire"]
Hab_field = "Interpreted_Habitat"
data_gdb = os.path.join(folder, "Data\Data.gdb")
LAD_table = os.path.join(data_gdb, "Arc_LADs")
CROME_data = os.path.join(data_gdb, "CROME_Arc_dissolve")
elif region == "Oxon" and method == "HLU":
# Operate in the Oxon_county folder
folder = r"C:\Users\cenv0389\Documents\Oxon_GIS\Oxon_county\Data"
data_gdb = os.path.join(folder, "Data.gdb")
LAD_table = os.path.join(folder, "Data.gdb", "Oxon_LADs")
CROME_data = os.path.join(data_gdb, "CROME_Oxon_dissolve")
Hab_field = "BAP_Habitat"
else:
print("ERROR: you cannot combine region " + region + " with method " + method)
exit()
LAD_names = []
needed_fields = ["TOID", "Theme", "DescriptiveGroup", "DescriptiveTerm", "Make", "OSMM_hab"]
# What method are we using to create the base map? Merge or intersect? This affects the processing stages used.
# merge_or_intersect = "intersect"
merge_or_intersect = "merge"
# Which stages of the code do we want to run? Depends on whether we are using merge or intersect to create the base map,
# as the merge is a two-stage process in which this script is called twice. Also useful for debugging or updates.
if merge_or_intersect == "intersect":
process_LCM = False
process_CROME = True
process_PHI = True
delete_landform = False
intersect_PHI = False
interpret_PHI = True
out_fc = "OSMM_LCM_PHI_intersect"
elif merge_or_intersect == "merge":
# Change step = 1 to step = 2 after running Merge_into_base_map to merge OSMM_LCM with PHI
step = 2
if step == 1:
process_LCM = True
process_CROME = True
process_PHI = True
delete_landform = True
intersect_PHI = False
interpret_PHI = False
elif step == 2:
process_LCM = False
process_CROME = False
process_PHI = True
delete_landform = False
intersect_PHI = False
interpret_PHI = True
out_fc = "OSMM_LCM_PHI_merge"
arcpy.env.workspace = data_gdb
LADs = arcpy.SearchCursor(os.path.join(data_gdb, LAD_table))
for LAD in LADs:
LAD_full_name = LAD.getValue("desc_")
LAD_county = LAD.getValue("county")
if LAD_county in LADs_included:
LAD_name = LAD_full_name.replace(" ", "")
LAD_names.append(LAD_name)
# Now process each LAD gdb
# Use CEH LCM to determine whether OSMM 'Agricultural land' is arable or improved grassland.
if process_LCM:
for LAD in LAD_names:
print ("Processing " + LAD)
arcpy.env.workspace = os.path.join(folder, LAD + ".gdb")
print("Copying OSMM to OSMM_LCM")
arcpy.CopyFeatures_management("OSMM", "OSMM_LCM")
print ("Adding LCM farmland interpretation to " + LAD)
MyFunctions.delete_fields("OSMM_LCM", needed_fields, "")
print (" Adding habitat fields")
MyFunctions.check_and_add_field("OSMM_LCM", "LCM_farmland", "TEXT", 100)
MyFunctions.check_and_add_field("OSMM_LCM", Hab_field, "TEXT", 100)
arcpy.CalculateField_management("OSMM_LCM", Hab_field, "!OSMM_hab!", "PYTHON_9.3")
print (" Identifying arable land")
arcpy.MakeFeatureLayer_management("OSMM_LCM", "ag_lyr")
arcpy.SelectLayerByAttribute_management("ag_lyr", where_clause="OSMM_hab = 'Agricultural land' OR OSMM_hab = 'Natural surface'")
arcpy.SelectLayerByLocation_management("ag_lyr", "HAVE_THEIR_CENTER_IN", "LCM_arable", selection_type="SUBSET_SELECTION")
arcpy.CalculateField_management("ag_lyr","LCM_farmland", "'Arable'", "PYTHON_9.3")
arcpy.CalculateField_management("ag_lyr", Hab_field, "'Arable'", "PYTHON_9.3")
arcpy.Delete_management("ag_lyr")
print (" Identifying improved grassland")
arcpy.MakeFeatureLayer_management("OSMM_LCM", "ag_lyr2")
arcpy.SelectLayerByAttribute_management("ag_lyr2", where_clause="OSMM_hab = 'Agricultural land' OR OSMM_hab = 'Natural surface'")
arcpy.SelectLayerByLocation_management("ag_lyr2", "HAVE_THEIR_CENTER_IN", "LCM_improved_grassland",
selection_type="SUBSET_SELECTION")
arcpy.CalculateField_management("ag_lyr2", "LCM_farmland", "'Improved grassland'", "PYTHON_9.3")
arcpy.Delete_management("ag_lyr2")
# Set interpreted habitat to Improved grassland if this is 'agricultural land'or Amenity grassland if this is 'Natural surface'
# unless it is railside (do not want to flag this as amenity grassland because it is not generally accessible)
expression = "LCM_farmland = 'Improved grassland' AND " + Hab_field + " = 'Agricultural land'"
MyFunctions.select_and_copy("OSMM_LCM", Hab_field, expression, "'Improved grassland'")
expression = "LCM_farmland = 'Improved grassland' AND " + Hab_field + " = 'Natural surface' AND DescriptiveGroup <> 'Rail'"
MyFunctions.select_and_copy("OSMM_LCM", Hab_field, expression, "'Amenity grassland'")
print(''.join(["## Finished on : ", time.ctime()]))
# Add crop type from CROME map, but only for agricultural land. This is probably better data then LCM and is freely available.
# This assumes we are adding CROME after adding LCM (so the Interpreted habitat field is already added and populated in the process_LCM
# step above), but in fact it is probably best just to use CROME (once we have tested vs LCM), so need to modify this step to include
# adding the interpreted habitat field
if process_CROME:
for LAD in LAD_names:
print ("Processing " + LAD)
arcpy.env.workspace = os.path.join(folder, LAD + ".gdb")
in_map = "OSMM_LCM"
out_map = in_map + "_CROME"
print("Copying " + in_map + " to " + out_map)
arcpy.CopyFeatures_management(in_map, out_map)
print ("Adding CROME farmland interpretation to " + LAD)
print (" Adding habitat fields")
MyFunctions.check_and_add_field(out_map, "CROME_farmland", "TEXT", 50)
print(" Copying OBJECTID for base map")
MyFunctions.check_and_add_field(out_map, "BaseID_CROME", "LONG", 0)
arcpy.CalculateField_management(out_map, "BaseID_CROME", "!OBJECTID!", "PYTHON_9.3")
print (" Identifying farmland")
arcpy.MakeFeatureLayer_management(out_map, "ag_lyr")
expression = "Interpreted_hab IN ('Agricultural land', 'Natural surface') OR Interpreted_hab LIKE 'Arable%'"
expression = expression + " OR Interpreted_hab LIKE 'Improved grassland%'"
arcpy.SelectLayerByAttribute_management("ag_lyr", where_clause=expression)
print(" Calculating percentage of farmland features within CROME polygons")
arcpy.TabulateIntersection_analysis(CROME_data, ["LUCODE", "Land Use Description", "field", "Shape_Area"],
"ag_lyr", "CROME_TI", ["BaseID_CROME", Hab_field, "Shape_Area"])
print(" Sorting TI table by size so that larger intersections are first in the list")
arcpy.Sort_management("CROME_TI", "CROME_TI_sort", [["AREA", "ASCENDING"]])
print (" Adding fields for CROME data")
MyFunctions.check_and_add_field(out_map, "CROME_desc", "TEXT", 50)
MyFunctions.check_and_add_field(out_map, "CROME_simple", "TEXT", 30)
print (" Joining CROME info for base map polygons that are >50% inside CROME polygons")
arcpy.AddJoin_management("ag_lyr", "BaseID_CROME", "CROME_TI_sort", "BaseID_CROME", "KEEP_ALL")
print(" Copying CROME data")
arcpy.SelectLayerByAttribute_management("ag_lyr", where_clause="CROME_TI_sort.PERCENTAGE > 50")
arcpy.CalculateField_management("ag_lyr", out_map + ".CROME_desc", "!CROME_TI_sort.Land Use Description!", "PYTHON_9.3")
arcpy.CalculateField_management("ag_lyr", out_map + ".CROME_simple", "!CROME_TI_sort.field!", "PYTHON_9.3")
# Remove the join
arcpy.RemoveJoin_management("ag_lyr", "CROME_TI_sort")
arcpy.Delete_management("ag_lyr")
# Set interpreted habitat to Improved grassland if this is 'agricultural land'or Amenity grassland if this is 'Natural surface'
# unless it is railside (do not want to flag this as amenity grassland because it is not generally accessible)
expression = "CROME_desc IN ('Grass', 'Fallow Land') AND " + Hab_field + " IN ('Agricultural land', 'Arable')"
MyFunctions.select_and_copy(out_map, Hab_field, expression, "'Improved grassland'")
expression = "CROME_desc IN ('Grass', 'Fallow Land') AND " + Hab_field + " = 'Natural surface' AND DescriptiveGroup <> 'Rail'"
MyFunctions.select_and_copy(out_map, Hab_field, expression, "'Amenity grassland'")
expression = "CROME_desc = 'Arable' AND " + Hab_field + " = 'Agricultural land'"
MyFunctions.select_and_copy(out_map, Hab_field, expression, "'Arable'")
expression = "CROME_desc = 'Short Rotation Coppice' AND " + Hab_field + " = 'Agricultural land'"
MyFunctions.select_and_copy(out_map, Hab_field, expression, "'Arable'")
print(''.join(["## Finished on : ", time.ctime()]))
if process_PHI:
for LAD in LAD_names:
arcpy.env.workspace = os.path.join(folder, LAD + ".gdb")
if delete_landform:
print(" Deleting overlapping 'Landform' and 'Pylon' from OSMM for " + LAD)
arcpy.MakeFeatureLayer_management("OSMM_LCM", "OSMM_layer")
expression = "DescriptiveGroup LIKE '%Landform%' OR DescriptiveTerm IN ('Cliff','Slope','Pylon')"
arcpy.SelectLayerByAttribute_management("OSMM_layer", where_clause=expression)
arcpy.DeleteFeatures_management("OSMM_layer")
arcpy.Delete_management("OSMM_layer")
if intersect_PHI:
print ("Intersecting " + LAD)
arcpy.Identity_analysis("OSMM_LCM", "PHI", out_fc, "NO_FID")
if interpret_PHI:
print ("Interpreting " + LAD)
# Copy PHI habitat across, but not for manmade, gardens, water, unidentified PHI, wood pasture or OMHD (dealt with later)
expression = "Make = 'Natural' AND DescriptiveGroup NOT LIKE '%water%' AND DescriptiveGroup NOT LIKE '%Water%' AND " \
"OSMM_hab <> 'Roadside - unknown surface' AND OSMM_hab <> 'Track' AND OSMM_hab <> 'Standing water' "
expression2 = expression + " AND PHI IS NOT NULL AND PHI <> '' AND PHI NOT LIKE 'No main%' AND " \
"PHI NOT LIKE 'Wood-pasture%' AND PHI NOT LIKE 'Open Mosaic%'"
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", expression2, "!PHI!")
# Correction for traditional orchards in large gardens
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "PHI = 'Traditional orchard' AND OSMM_hab = 'Garden'",
"'Traditional orchards'")
# Other corrections / consolidations
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat = 'Deciduous woodland'",
"'Broadleaved woodland - semi-natural'")
expression3 = "Interpreted_habitat LIKE '%grazing marsh%' OR Interpreted_habitat LIKE 'Purple moor grass%'"
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", expression3, "'Marshy grassland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat LIKE '%semi-improved grassland%'",
"'Semi-natural grassland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat LIKE '%meadow%'",
"'Neutral grassland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat = 'Traditional orchard'",
"'Traditional orchards'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat LIKE '%alcareous%'",
"'Calcareous grassland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat = 'Lowland heathland'",
"'Heathland'")
MyFunctions.select_and_copy(out_fc, "Interpreted_habitat", "Interpreted_habitat = 'Reedbeds'",
"'Reedbed'")
# Copy over OMHD only if the habitat is fairly generic (OMHD dataset covers areas of | |
# Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto.sns
import grizzly_util
import eventlet
from eventlet.timeout import Timeout
from eventlet.green.urllib import request
from eventlet.green.urllib import error
import eventlet.green.ssl as ssl
import json
import _thread
import time
import string
import urllib.parse
from datetime import datetime
from collections import Counter
import random
class GrizzlyConfiguration():
'''
This class is called to configure and conduct an application layer
DoS test.
More information on how to configure the tool can be found on:
https://github.com/netflix-skunkworks/repulsive-grizzly
'''
def __init__(self):
# Read in config file
self.conf = ""
with open("commands.json") as config:
self.conf = json.loads(config.read())
self.status_code = []
# If setup to use Kraken, we should ensure sns_region and sns_topic
try:
if self.conf["use_with_kraken"]:
self.use_kraken = True
self.sns = boto.sns.connect_to_region(self.conf["sns_region"])
self.topic = self.conf["sns_topic"]
else:
self.use_kraken = False
except:
print("Could not set sns_region or sns_topic, did you specify them?")
exit(1)
# Check if we should perform a sanity check
try:
if self.conf["perform_sanity_check"]:
self.perform_sanity_check = True
else:
self.perform_sanity_check = False
except:
print("Could not determine if we should do sanity check")
exit(1)
# If setup to use Kraken, we should ensure sns_region and sns_topic
try:
if self.conf["use_with_kraken"]:
self.instance_id = grizzly_util.get_node_number("all")
else:
self.instance_id = 1
except:
print("Could not set instance, do you have AWS credentials "
"on the host you are running Repulsive Grizzly?")
exit(1)
self.cookie_list = []
self.headers_list = []
def payload_generator(self, size=50, chars=string.ascii_uppercase + string.digits):
'''
Payload generator can be used by supplying a placehodler $$1$$
and overwritten for your specific use case
NOTE: This is not currently used or implemented
'''
return ''.join(random.choice(chars) for _ in range(size))
def load_commands(self, command_file):
'''
Loads all commands into self object
'''
# Make sure there is a hostname defined, otherwise we can't set header
try:
self.verb = self.conf["verb"]
except:
print("Could not resolve HTTP Verb for attack, exiting")
exit(1)
# Configure proxy if enabled
try:
if self.conf["proxy"]:
self.proxy = True
self.proxy_config = self.conf["proxy_config"]
else:
self.proxy = False
except:
print("Proxy should be set to True/False in the commands.json")
exit(1)
# Grab the sanity check url
try:
self.sanity_check_url = self.conf["sanity_check_url"]
except:
print("No sanity check url provided, how do we know we are healthy?")
exit(1)
# Make sure there is a hostname defined, otherwise we can't set header
try:
self.host = self.conf["hostname"]
except:
print("Could not resolve hostname for attack, exiting")
exit(1)
# Load post data if provided and verb is either post, put or patch
try:
if self.verb.lower() in ["post", "put", "patch"] and self.conf["post_data"]:
if self.conf["post_data"]:
with open("post_data/{}".format(str(self.conf["post_data"]))) as post_data:
self.post_data = post_data.read().replace('\n', '')
else:
self.post_data = ""
except:
print("Could not resolve post data, did you specify the correct filename?")
raise
# If configured to use cookies, load the cookies from json into string?
try:
if self.conf["use_auth"]:
self.auth_store_name = self.conf["auth_store_name"]
with open("./authentication/{}".format(self.auth_store_name)) as auth_objects:
self.auth_objects = json.loads(auth_objects.read())
else:
self.auth_objects = []
except Exception as e:
print("Could not resolve cookie store for attack, exiting")
print(e)
exit(1)
# You can set one_url_per_agent to true to have each agent
# hit all URLs or moduls to fix one URL per attack agent.
# Otherwise this defaults to all urls per each agent
try:
if self.conf["urls"] and self.conf["one_url_per_agent"]:
self.urls = [self.conf["urls"][int(self.instance_id) % len(self.conf["urls"])]]
elif self.conf["urls"]:
self.urls = self.conf["urls"]
except Exception as e:
print("Could not assign one url per agent, exiting!")
print(e)
exit(1)
# Load headers into a dict object
if self.conf["headers"]:
self.header_store_name = self.conf["headers"]
with open("./headers/{}".format(self.header_store_name)) as config:
self.headers = json.loads(config.read())
else:
print("no headers specified, using default headers.")
with open("./headers/{}".format("default")) as config:
self.headers = json.loads(config.read())
# If we need to replace auth objects, let's load them and build a map
if len(self.auth_objects) > 0:
# This method generates a random sample with a deterministic seed
# to ensure each instances uses the same cookies
try:
random_sample = random
random_sample.seed(self.conf["auth_store_count"])
if len(self.auth_objects) != 0:
self.auth_objects = random_sample.sample(self.auth_objects, (self.conf["auth_store_count"]))
else:
self.auth_objects = []
except:
print("Did you specify the number of objects (auth_store_count) "
"for your authentication store?")
exit(1)
# The following code blocks compute all possible requests depending
# on how many auth objects were provided.
self.computed_requests = {}
self.computed_requests["urls"] = []
self.computed_requests["headers"] = []
self.computed_requests["post_data"] = []
temp_hash = {}
# Compute a list of URLs with associated auth objects if identified
for url in self.urls:
if "$$AUTH$$" in url:
for auth_object in self.auth_objects:
self.computed_requests["urls"].append(url.replace("$$AUTH$$", auth_object))
else:
self.computed_requests["urls"].append(url)
# Compute a list of headers with associated auth objects if identified
auth_headers = False
for header in self.headers.values():
if "$$AUTH$$" in header:
auth_headers = True
if auth_headers:
for i in range(len(self.auth_objects)):
print(i)
temp_hash = {}
for key, value in self.headers.items():
if "$$AUTH$$" in value:
temp_hash.update({key: value.replace("$$AUTH$$", self.auth_objects[i])})
else:
temp_hash.update({key: value})
self.computed_requests["headers"].append(temp_hash)
else:
self.computed_requests["headers"] = [self.headers]
# Compute a list of post_data samples with associated auth objects if identified
if self.post_data:
if "$$AUTH$$" in self.post_data:
auth_headers = True
if auth_headers:
for i in range(len(self.auth_objects)):
self.computed_requests["post_data"].append(self.post_data.replace("$$AUTH$$", self.auth_objects[i]))
else:
self.computed_requests["post_data"] = [self.post_data]
else:
self.computed_requests = {}
self.computed_requests["urls"] = []
self.computed_requests["headers"] = []
self.computed_requests["post_data"] = []
temp_hash = {}
self.computed_requests["urls"] = self.urls
self.computed_requests["headers"] = [self.headers]
self.computed_requests["post_data"] = [self.post_data]
def generate_request(self, verb, url, headers, post_data=None):
try:
# import pdb; pdb.set_trace()
req = request.Request(url,
data=post_data.encode("utf-8") if post_data is not None else None,
headers=headers,
method=verb)
if self.proxy:
req.set_proxy(self.proxy_config, urllib.parse.urlparse(url).scheme)
response = request.urlopen(req, timeout=60, context=self.create_ctx())
else:
response = request.urlopen(req, timeout=60, context=self.create_ctx())
self.status_code.append(int(response.code))
except error.HTTPError as e:
self.status_code.append(int(e.code))
except error.URLError as e:
self.sns_logger(status_codes={}, exception=str(e.reason), subject="Grizzly Error")
except Exception:
import traceback
self.sns_logger(status_codes={}, exception=str(traceback.format_exc()), subject="Grizzly Error")
print(('generic exception: ' + traceback.format_exc()))
def countdown(self, start_time):
'''
This method sleeps until the start_time is triggered.
This is used to keep attack agents in sync so they start
their tests at the same time.
'''
print(("Executing Test on "
"{} with {} threads "
"via {} url(s) for "
"{} seconds".format(self.conf["hostname"],
str(self.conf["threads"]),
self.urls,
str(self.conf["ttl"]))))
now = datetime.now()
timestamp = start_time.split(':')
start_attack = now.replace(hour=int(timestamp[0]), minute=int(
timestamp[1]), second=int(timestamp[2]))
t = int((start_attack - now).total_seconds())
print(("Attack starts at: {} in {} seconds".format(start_time, t)))
while start_attack > now:
now = datetime.now()
timestamp = start_time.split(':')
start_attack = now.replace(hour=int(timestamp[0]), minute=int(
timestamp[1]), second=int(timestamp[2]))
mins, secs = divmod(t, 60)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
print(timeformat)
time.sleep(1)
t -= 1
print('Attack Executing!\n\n')
def sns_logger(self, status_codes={}, exception=None, subject="Grizzly Log", url=""):
'''
This method logs messages to an SNS queue and/or prints them to console.
'''
timestamp = '%s' % datetime.now()
agent = self.instance_id
if url == "":
url = self.urls
if status_codes:
message = json.dumps({"agent": agent,
"timestamp": timestamp,
"status_codes": status_codes,
"elb": url})
if self.use_kraken:
self.sns.publish(message=message, subject=subject, topic=self.topic)
print(message)
# I am not handling exceptions yet, but this is for future
if exception:
message = json.dumps({"agent": agent,
"timestamp": timestamp,
"url": url,
"exception": exception})
if self.use_kraken:
self.sns.publish(message=message, subject=subject, topic=self.topic)
print(message)
def status_counter(self, thread_name):
'''
This provides status updates to the SNS queue every 5 seconds
'''
while True:
time.sleep(5)
status_codes = Counter(self.status_code)
self.sns_logger(status_codes)
self.status_code = []
def create_ctx(self):
'''
This method sets the right ssl context to disable hostname checking
and certificate validation.
'''
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
return ctx
def sanity_check(self, client, computed_requests):
'''
This method checks that the sanity_check_url provides a 200 status code.
If the sanity check fails, the application exists.
'''
req = request.Request(client, headers=self.computed_requests["headers"][0])
response = request.urlopen(req, timeout=60, context=self.create_ctx())
if response.code != 200:
self.sns_logger(status_codes={},
exception=str(response.code),
subject="Grizzly Sanity Check Failed",
url=client)
raise
else:
self.sns_logger(status_codes={},
exception=str(response.code),
subject="Grizzly Sanity Check Passed",
url=client)
print('Sanity check passed: 200 OK')
return True
if __name__ == "__main__":
# Initialize class and load command file
grizzly_config = GrizzlyConfiguration()
grizzly_config.load_commands("commands.json")
# Set threadpool
pool = eventlet.GreenPool(int(grizzly_config.conf["threads"]))
# Start time is when to start in seconds from time
try:
# First publish a | |
~azure.cognitiveservices.vision.face.models.Coordinate
:param nose_left_alar_out_tip:
:type nose_left_alar_out_tip:
~azure.cognitiveservices.vision.face.models.Coordinate
:param nose_right_alar_out_tip:
:type nose_right_alar_out_tip:
~azure.cognitiveservices.vision.face.models.Coordinate
:param upper_lip_top:
:type upper_lip_top:
~azure.cognitiveservices.vision.face.models.Coordinate
:param upper_lip_bottom:
:type upper_lip_bottom:
~azure.cognitiveservices.vision.face.models.Coordinate
:param under_lip_top:
:type under_lip_top:
~azure.cognitiveservices.vision.face.models.Coordinate
:param under_lip_bottom:
:type under_lip_bottom:
~azure.cognitiveservices.vision.face.models.Coordinate
"""
_attribute_map = {
'pupil_left': {'key': 'pupilLeft', 'type': 'Coordinate'},
'pupil_right': {'key': 'pupilRight', 'type': 'Coordinate'},
'nose_tip': {'key': 'noseTip', 'type': 'Coordinate'},
'mouth_left': {'key': 'mouthLeft', 'type': 'Coordinate'},
'mouth_right': {'key': 'mouthRight', 'type': 'Coordinate'},
'eyebrow_left_outer': {'key': 'eyebrowLeftOuter', 'type': 'Coordinate'},
'eyebrow_left_inner': {'key': 'eyebrowLeftInner', 'type': 'Coordinate'},
'eye_left_outer': {'key': 'eyeLeftOuter', 'type': 'Coordinate'},
'eye_left_top': {'key': 'eyeLeftTop', 'type': 'Coordinate'},
'eye_left_bottom': {'key': 'eyeLeftBottom', 'type': 'Coordinate'},
'eye_left_inner': {'key': 'eyeLeftInner', 'type': 'Coordinate'},
'eyebrow_right_inner': {'key': 'eyebrowRightInner', 'type': 'Coordinate'},
'eyebrow_right_outer': {'key': 'eyebrowRightOuter', 'type': 'Coordinate'},
'eye_right_inner': {'key': 'eyeRightInner', 'type': 'Coordinate'},
'eye_right_top': {'key': 'eyeRightTop', 'type': 'Coordinate'},
'eye_right_bottom': {'key': 'eyeRightBottom', 'type': 'Coordinate'},
'eye_right_outer': {'key': 'eyeRightOuter', 'type': 'Coordinate'},
'nose_root_left': {'key': 'noseRootLeft', 'type': 'Coordinate'},
'nose_root_right': {'key': 'noseRootRight', 'type': 'Coordinate'},
'nose_left_alar_top': {'key': 'noseLeftAlarTop', 'type': 'Coordinate'},
'nose_right_alar_top': {'key': 'noseRightAlarTop', 'type': 'Coordinate'},
'nose_left_alar_out_tip': {'key': 'noseLeftAlarOutTip', 'type': 'Coordinate'},
'nose_right_alar_out_tip': {'key': 'noseRightAlarOutTip', 'type': 'Coordinate'},
'upper_lip_top': {'key': 'upperLipTop', 'type': 'Coordinate'},
'upper_lip_bottom': {'key': 'upperLipBottom', 'type': 'Coordinate'},
'under_lip_top': {'key': 'underLipTop', 'type': 'Coordinate'},
'under_lip_bottom': {'key': 'underLipBottom', 'type': 'Coordinate'},
}
def __init__(self, **kwargs):
super(FaceLandmarks, self).__init__(**kwargs)
self.pupil_left = kwargs.get('pupil_left', None)
self.pupil_right = kwargs.get('pupil_right', None)
self.nose_tip = kwargs.get('nose_tip', None)
self.mouth_left = kwargs.get('mouth_left', None)
self.mouth_right = kwargs.get('mouth_right', None)
self.eyebrow_left_outer = kwargs.get('eyebrow_left_outer', None)
self.eyebrow_left_inner = kwargs.get('eyebrow_left_inner', None)
self.eye_left_outer = kwargs.get('eye_left_outer', None)
self.eye_left_top = kwargs.get('eye_left_top', None)
self.eye_left_bottom = kwargs.get('eye_left_bottom', None)
self.eye_left_inner = kwargs.get('eye_left_inner', None)
self.eyebrow_right_inner = kwargs.get('eyebrow_right_inner', None)
self.eyebrow_right_outer = kwargs.get('eyebrow_right_outer', None)
self.eye_right_inner = kwargs.get('eye_right_inner', None)
self.eye_right_top = kwargs.get('eye_right_top', None)
self.eye_right_bottom = kwargs.get('eye_right_bottom', None)
self.eye_right_outer = kwargs.get('eye_right_outer', None)
self.nose_root_left = kwargs.get('nose_root_left', None)
self.nose_root_right = kwargs.get('nose_root_right', None)
self.nose_left_alar_top = kwargs.get('nose_left_alar_top', None)
self.nose_right_alar_top = kwargs.get('nose_right_alar_top', None)
self.nose_left_alar_out_tip = kwargs.get('nose_left_alar_out_tip', None)
self.nose_right_alar_out_tip = kwargs.get('nose_right_alar_out_tip', None)
self.upper_lip_top = kwargs.get('upper_lip_top', None)
self.upper_lip_bottom = kwargs.get('upper_lip_bottom', None)
self.under_lip_top = kwargs.get('under_lip_top', None)
self.under_lip_bottom = kwargs.get('under_lip_bottom', None)
class NameAndUserDataContract(Model):
"""A combination of user defined name and user specified data for the person,
largePersonGroup/personGroup, and largeFaceList/faceList.
:param name: User defined name, maximum length is 128.
:type name: str
:param user_data: User specified data. Length should not exceed 16KB.
:type user_data: str
"""
_validation = {
'name': {'max_length': 128},
'user_data': {'max_length': 16384},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'user_data': {'key': 'userData', 'type': 'str'},
}
def __init__(self, **kwargs):
super(NameAndUserDataContract, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.user_data = kwargs.get('user_data', None)
class MetaDataContract(NameAndUserDataContract):
"""A combination of user defined name and user specified data and recognition
model name for largePersonGroup/personGroup, and largeFaceList/faceList.
:param name: User defined name, maximum length is 128.
:type name: str
:param user_data: User specified data. Length should not exceed 16KB.
:type user_data: str
:param recognition_model: Possible values include: 'recognition_01',
'recognition_02', 'recognition_03'. Default value: "recognition_01" .
:type recognition_model: str or
~azure.cognitiveservices.vision.face.models.RecognitionModel
"""
_validation = {
'name': {'max_length': 128},
'user_data': {'max_length': 16384},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'user_data': {'key': 'userData', 'type': 'str'},
'recognition_model': {'key': 'recognitionModel', 'type': 'str'},
}
def __init__(self, **kwargs):
super(MetaDataContract, self).__init__(**kwargs)
self.recognition_model = kwargs.get('recognition_model', "recognition_01")
class FaceList(MetaDataContract):
"""Face list object.
All required parameters must be populated in order to send to Azure.
:param name: User defined name, maximum length is 128.
:type name: str
:param user_data: User specified data. Length should not exceed 16KB.
:type user_data: str
:param recognition_model: Possible values include: 'recognition_01',
'recognition_02', 'recognition_03'. Default value: "recognition_01" .
:type recognition_model: str or
~azure.cognitiveservices.vision.face.models.RecognitionModel
:param face_list_id: Required. FaceListId of the target face list.
:type face_list_id: str
:param persisted_faces: Persisted faces within the face list.
:type persisted_faces:
list[~azure.cognitiveservices.vision.face.models.PersistedFace]
"""
_validation = {
'name': {'max_length': 128},
'user_data': {'max_length': 16384},
'face_list_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'user_data': {'key': 'userData', 'type': 'str'},
'recognition_model': {'key': 'recognitionModel', 'type': 'str'},
'face_list_id': {'key': 'faceListId', 'type': 'str'},
'persisted_faces': {'key': 'persistedFaces', 'type': '[PersistedFace]'},
}
def __init__(self, **kwargs):
super(FaceList, self).__init__(**kwargs)
self.face_list_id = kwargs.get('face_list_id', None)
self.persisted_faces = kwargs.get('persisted_faces', None)
class FaceRectangle(Model):
"""A rectangle within which a face can be found.
All required parameters must be populated in order to send to Azure.
:param width: Required. The width of the rectangle, in pixels.
:type width: int
:param height: Required. The height of the rectangle, in pixels.
:type height: int
:param left: Required. The distance from the left edge if the image to the
left edge of the rectangle, in pixels.
:type left: int
:param top: Required. The distance from the top edge if the image to the
top edge of the rectangle, in pixels.
:type top: int
"""
_validation = {
'width': {'required': True},
'height': {'required': True},
'left': {'required': True},
'top': {'required': True},
}
_attribute_map = {
'width': {'key': 'width', 'type': 'int'},
'height': {'key': 'height', 'type': 'int'},
'left': {'key': 'left', 'type': 'int'},
'top': {'key': 'top', 'type': 'int'},
}
def __init__(self, **kwargs):
super(FaceRectangle, self).__init__(**kwargs)
self.width = kwargs.get('width', None)
self.height = kwargs.get('height', None)
self.left = kwargs.get('left', None)
self.top = kwargs.get('top', None)
class FacialHair(Model):
"""Properties describing facial hair attributes.
:param moustache:
:type moustache: float
:param beard:
:type beard: float
:param sideburns:
:type sideburns: float
"""
_attribute_map = {
'moustache': {'key': 'moustache', 'type': 'float'},
'beard': {'key': 'beard', 'type': 'float'},
'sideburns': {'key': 'sideburns', 'type': 'float'},
}
def __init__(self, **kwargs):
super(FacialHair, self).__init__(**kwargs)
self.moustache = kwargs.get('moustache', None)
self.beard = kwargs.get('beard', None)
self.sideburns = kwargs.get('sideburns', None)
class FindSimilarRequest(Model):
"""Request body for find similar operation.
All required parameters must be populated in order to send to Azure.
:param face_id: Required. FaceId of the query face. User needs to call
Face - Detect first to get a valid faceId. Note that this faceId is not
persisted and will expire 24 hours after the detection call
:type face_id: str
:param face_list_id: An existing user-specified unique candidate face
list, created in Face List - Create a Face List. Face list contains a set
of persistedFaceIds which are persisted and will never expire. Parameter
faceListId, largeFaceListId and faceIds should not be provided at the same
time.
:type face_list_id: str
:param large_face_list_id: An existing user-specified unique candidate
large face list, created in LargeFaceList - Create. Large face list
contains a set of persistedFaceIds which are persisted and will never
expire. Parameter faceListId, largeFaceListId and faceIds should not be
provided at the same time.
:type large_face_list_id: str
:param face_ids: An array of candidate faceIds. All of them are created by
Face - Detect and the faceIds will expire 24 hours after the detection
call. The number of faceIds is limited to 1000. Parameter faceListId,
largeFaceListId and faceIds should not be provided at the same time.
:type face_ids: list[str]
:param max_num_of_candidates_returned: The number of top similar faces
returned. The valid range is [1, 1000]. Default value: 20 .
:type max_num_of_candidates_returned: int
:param mode: Similar face searching mode. It can be "matchPerson" or
"matchFace". Possible values include: 'matchPerson', 'matchFace'. Default
value: "matchPerson" .
:type mode: str or
~azure.cognitiveservices.vision.face.models.FindSimilarMatchMode
"""
_validation = {
'face_id': {'required': True},
'face_list_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'},
'large_face_list_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'},
'face_ids': {'max_items': 1000},
'max_num_of_candidates_returned': {'maximum': 1000, 'minimum': 1},
}
_attribute_map = {
'face_id': {'key': 'faceId', 'type': 'str'},
'face_list_id': {'key': 'faceListId', 'type': 'str'},
'large_face_list_id': {'key': 'largeFaceListId', 'type': 'str'},
'face_ids': {'key': 'faceIds', 'type': '[str]'},
'max_num_of_candidates_returned': {'key': 'maxNumOfCandidatesReturned', 'type': 'int'},
'mode': {'key': 'mode', 'type': 'FindSimilarMatchMode'},
}
def __init__(self, **kwargs):
super(FindSimilarRequest, self).__init__(**kwargs)
self.face_id = kwargs.get('face_id', None)
self.face_list_id = kwargs.get('face_list_id', None)
self.large_face_list_id = kwargs.get('large_face_list_id', None)
self.face_ids = kwargs.get('face_ids', None)
self.max_num_of_candidates_returned = kwargs.get('max_num_of_candidates_returned', 20)
self.mode = kwargs.get('mode', "matchPerson")
class GroupRequest(Model):
"""Request body for group request.
All required parameters must be populated in order to send to Azure.
:param face_ids: Required. Array of candidate faceId created by Face -
Detect. The maximum is 1000 faces
:type face_ids: list[str]
"""
_validation = {
'face_ids': {'required': True, 'max_items': 1000},
}
_attribute_map = {
'face_ids': {'key': 'faceIds', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(GroupRequest, self).__init__(**kwargs)
self.face_ids = kwargs.get('face_ids', None)
class GroupResult(Model):
"""An array of face groups based on face similarity.
All required parameters must be populated in order to send to Azure.
:param groups: Required. A partition of the original faces based on face
similarity. Groups are ranked by number of faces
:type groups: list[list[str]]
:param messy_group: Face ids array of faces | |
# Copyright 2021 Accenture Global Solutions Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import trac.rt.impl.util as util
import trac.rt.exec.actors as actors
import unittest
class ActorSystemTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
util.configure_logging()
def test_actor_lifecycle(self):
# The most basic test case:
# Start one actor, it processes a single message and stops
results = []
class TestActor(actors.Actor):
def on_start(self):
results.append("on_start")
def on_stop(self):
results.append("on_stop")
@actors.Message
def sample_message(self, value):
results.append("sample_message")
results.append(value)
self.actors().stop()
root = TestActor()
system = actors.ActorSystem(root)
system.start()
system.send("sample_message", 1)
system.wait_for_shutdown()
self.assertEqual(["on_start", "sample_message", 1, "on_stop"], results)
# Make sure the system went down cleanly
self.assertEqual(0, system.shutdown_code())
def test_unknown_actor_ignored(self):
# Messages sent to an unknown actor ID are silently dropped (there is a warning in the logs)
results = []
class TestActor(actors.Actor):
def on_start(self):
results.append("on_start")
self.actors().send("/nonexistent/actor", "sample_message", 1)
self.actors().send(self.actors().id, "sample_message", 1)
def on_stop(self):
results.append("on_stop")
@actors.Message
def sample_message(self, value):
results.append("sample_message")
results.append(value)
self.actors().stop()
root = TestActor()
system = actors.ActorSystem(root)
system.start()
system.wait_for_shutdown()
self.assertEqual(["on_start", "sample_message", 1, "on_stop"], results)
self.assertEqual(0, system.shutdown_code())
def test_bad_message_params(self):
errors = []
class TargetActor(actors.Actor):
@actors.Message
def sample_message(self, value: int):
pass
@actors.Message
def sample_message_2(self, value: int, other: str = ''):
pass
@actors.Message
def sample_with_default(self, value: int = 0):
pass
class TestActor(actors.Actor):
def on_start(self):
target_id = self.actors().spawn(TargetActor)
try:
self.actors().send(target_id, "unknown_message")
except Exception: # noqa
errors.append("unknown_message")
try:
self.actors().send(target_id, "sample_message_2", 1, unknown=2)
except Exception: # noqa
errors.append("unknown_param")
try:
self.actors().send(target_id, "sample_message")
except Exception: # noqa
errors.append("missing_param")
try:
self.actors().send(target_id, "sample_message", 1, 2)
except Exception: # noqa
errors.append("extra_param")
try:
self.actors().send(target_id, "sample_message", "wrong_param_type")
except Exception: # noqa
errors.append("wrong_param_type")
try:
self.actors().send(target_id, "sample_message", value="wrong_kw_param_type")
except Exception: # noqa
errors.append("wrong_kw_param_type")
# Should not error, parameter 'value' should take the default
self.actors().send(target_id, "sample_with_default")
self.actors().stop()
root = TestActor()
system = actors.ActorSystem(root)
system.start()
system.wait_for_shutdown()
self.assertEqual([
"unknown_message", "unknown_param",
"missing_param", "extra_param",
"wrong_param_type", "wrong_kw_param_type"],
errors)
# System should have gone down cleanly, errors are caught where they occur
self.assertEqual(0, system.shutdown_code())
def test_explicit_signals_not_allowed(self):
# Actors cannot explicitly send signals (signals are system messages prefixed 'actor:')
results = []
class TestActor(actors.Actor):
def on_start(self):
try:
self.actors().send("/nonexistent/actor", "actor:any_signal")
except Exception: # noqa
results.append("explicit_signal_failed")
self.actors().stop()
root = TestActor()
system = actors.ActorSystem(root)
system.start()
system.wait_for_shutdown()
self.assertEqual(["explicit_signal_failed"], results)
self.assertEqual(0, system.shutdown_code())
def test_actor_failure_1(self):
# Actor throws an error while processing a message
results = []
class TestActor(actors.Actor):
def on_start(self):
results.append("on_start")
def on_stop(self):
results.append("on_stop")
@actors.Message
def sample_message(self, value):
results.append("sample_message")
results.append(value)
raise RuntimeError("err_code_1")
root = TestActor()
system = actors.ActorSystem(root)
system.start()
system.send("sample_message", 1)
system.wait_for_shutdown()
# Actor should receive on_stop after raising the error
self.assertEqual(["on_start", "sample_message", 1, "on_stop"], results)
code = system.shutdown_code()
error = system.shutdown_error()
self.assertNotEqual(0, code)
self.assertIsInstance(error, RuntimeError)
self.assertEqual("err_code_1", error.args[0])
def test_actor_failure_2(self):
# Actor throws an error during on_start
results = []
class TestActor(actors.Actor):
def on_start(self):
results.append("on_start")
raise RuntimeError("err_code_2")
def on_stop(self):
results.append("on_stop")
@actors.Message
def sample_message(self, value):
results.append("sample_message")
results.append(value)
self.actors().stop()
root = TestActor()
system = actors.ActorSystem(root)
system.start()
system.send("sample_message", 1)
system.wait_for_shutdown()
# Actor should not receive on_stop if on_start fails
self.assertEqual(["on_start"], results)
code = system.shutdown_code()
error = system.shutdown_error()
self.assertNotEqual(0, code)
self.assertIsInstance(error, RuntimeError)
self.assertEqual("err_code_2", error.args[0])
def test_actor_failure_3(self):
# Actor throws an error during on_stop
results = []
class TestActor(actors.Actor):
def on_start(self):
results.append("on_start")
def on_stop(self):
results.append("on_stop")
raise RuntimeError("err_code_3")
@actors.Message
def sample_message(self, value):
results.append("sample_message")
results.append(value)
self.actors().stop()
root = TestActor()
system = actors.ActorSystem(root)
system.start()
system.send("sample_message", 1)
system.wait_for_shutdown()
self.assertEqual(["on_start", "sample_message", 1, "on_stop"], results)
code = system.shutdown_code()
error = system.shutdown_error()
self.assertNotEqual(0, code)
self.assertIsInstance(error, RuntimeError)
self.assertEqual("err_code_3", error.args[0])
def test_child_lifecycle(self):
# Parent creates one child and sends it a message
# Child processes one message and stops, child.on_stop should be called
# child stopped signal should be received in the parent
results = []
class ChildActor(actors.Actor):
def on_start(self):
results.append("child_start")
self.actors().send_parent("child_started", self.actors().id)
def on_stop(self):
results.append("child_stop")
class ParentActor(actors.Actor):
def __init__(self):
super().__init__()
self.child_id = None
def on_start(self):
results.append("parent_start")
self.child_id = self.actors().spawn(ChildActor)
def on_stop(self):
results.append("parent_stop")
def on_signal(self, signal: actors.Signal) -> bool:
if signal.sender == self.child_id:
results.append("parent_signal")
results.append(signal.message)
self.actors().stop()
return True
@actors.Message
def child_started(self, child_id):
results.append("child_started")
self.actors().stop(child_id)
root = ParentActor()
system = actors.ActorSystem(root)
system.start()
system.wait_for_shutdown()
self.assertEqual([
"parent_start",
"child_start", "child_started",
"child_stop", "parent_signal", "actor:stopped",
"parent_stop"],
results)
# Make sure the system went down cleanly
self.assertEqual(0, system.shutdown_code())
def test_multiple_children(self):
results = []
class ChildActor(actors.Actor):
def on_start(self):
results.append("child_start")
self.actors().send_parent("new_child")
def on_stop(self):
results.append("child_stop")
class ParentActor(actors.Actor):
def __init__(self):
super().__init__()
self.child_count = 0
def on_start(self):
results.append("parent_start")
self.actors().spawn(ChildActor)
def on_stop(self):
results.append("parent_stop")
@actors.Message
def new_child(self):
results.append("new_child")
self.child_count += 1
if self.child_count < 3:
self.actors().spawn(ChildActor)
else:
self.actors().stop()
root = ParentActor()
system = actors.ActorSystem(root)
system.start()
system.wait_for_shutdown()
self.assertEqual([
"parent_start",
"child_start", "new_child",
"child_start", "new_child",
"child_start", "new_child",
"child_stop", "child_stop", "child_stop",
"parent_stop"],
results)
# Make sure the system went down cleanly
self.assertEqual(0, system.shutdown_code())
def test_child_shutdown_order(self):
results = []
class Grandchild(actors.Actor):
def __init__(self, root_id: actors.ActorId):
super().__init__()
self.root_id = root_id
def on_start(self):
results.append("grandchild_start")
self.actors().send(self.root_id, "grandchild_started")
def on_stop(self):
results.append("grandchild_stop")
class Child(actors.Actor):
def on_start(self):
results.append("child_start")
self.actors().spawn(Grandchild, self.actors().parent)
def on_stop(self):
results.append("child_stop")
class Parent(actors.Actor):
def on_start(self):
results.append("parent_start")
self.actors().spawn(Child)
def on_stop(self):
results.append("parent_stop")
@actors.Message
def grandchild_started(self):
results.append("grandchild_started")
self.actors().stop()
root = Parent()
system = actors.ActorSystem(root)
system.start()
system.wait_for_shutdown()
self.assertEqual([
"parent_start", "child_start", "grandchild_start",
"grandchild_started",
"grandchild_stop", "child_stop", "parent_stop"],
results)
# Make sure the system went down cleanly
self.assertEqual(0, system.shutdown_code())
def test_stop_sibling_not_allowed(self):
# Actors are only allowed to stop themselves or their direct children
results = []
class ChildActor(actors.Actor):
def __init__(self, other_id):
super().__init__()
self.other_id = other_id
def on_start(self):
if self.other_id:
try:
self.actors().stop(self.other_id)
except Exception: # noqa
results.append("stop_other_failed")
self.actors().send_parent("child_up")
class ParentActor(actors.Actor):
def __init__(self):
super().__init__()
self.child_count = 0
def on_start(self):
child1 = self.actors().spawn(ChildActor, None)
self.actors().spawn(ChildActor, child1)
@actors.Message
def child_up(self):
self.child_count += 1
if self.child_count == 2:
self.actors().stop()
root = ParentActor()
system = actors.ActorSystem(root)
system.start()
system.wait_for_shutdown()
self.assertEqual(["stop_other_failed"], results)
self.assertEqual(0, system.shutdown_code())
def test_child_failure_1(self):
# Child throws an error while processing a message
results = []
class ChildActor(actors.Actor):
def on_start(self):
results.append("child_start")
def on_stop(self):
results.append("child_stop")
@actors.Message
def sample_message(self, value):
results.append("sample_message")
results.append(value)
raise RuntimeError("err_code_1")
class ParentActor(actors.Actor):
def on_start(self):
results.append("parent_start")
child_id = self.actors().spawn(ChildActor)
self.actors().send(child_id, "sample_message", 1)
def on_stop(self):
results.append("parent_stop")
root = ParentActor()
system = actors.ActorSystem(root)
system.start()
system.wait_for_shutdown()
# Both parent and child should receive stop after child errors
self.assertEqual([
"parent_start", "child_start",
"sample_message", 1,
"child_stop", "parent_stop"], results)
# Error info should propagate up
code = system.shutdown_code()
error = system.shutdown_error()
self.assertNotEqual(0, code)
self.assertIsInstance(error, RuntimeError)
self.assertEqual("err_code_1", error.args[0])
def test_child_failure_2(self):
# Child throws an error in on_start
results = []
class ChildActor(actors.Actor):
def on_start(self):
results.append("child_start")
raise RuntimeError("err_code_2")
def on_stop(self):
results.append("child_stop")
@actors.Message
def sample_message(self, value):
results.append("sample_message")
results.append(value)
class ParentActor(actors.Actor):
def on_start(self):
results.append("parent_start")
child_id = self.actors().spawn(ChildActor)
self.actors().send(child_id, "sample_message", 1)
def on_stop(self):
results.append("parent_stop")
root = ParentActor()
system = actors.ActorSystem(root)
system.start()
system.wait_for_shutdown()
# Child does not receive a stop message because it did not start successfully
# Parent still receives stop, because it does not handle failure of the child
self.assertEqual([
"parent_start", "child_start",
"parent_stop"], results)
# Error info should propagate up
code = system.shutdown_code()
error = system.shutdown_error()
self.assertNotEqual(0, code)
self.assertIsInstance(error, RuntimeError)
self.assertEqual("err_code_2", error.args[0])
def test_child_failure_3(self):
# Child throws an error in on_stop
results = []
class ChildActor(actors.Actor):
def on_start(self):
results.append("child_start")
def on_stop(self):
results.append("child_stop")
raise RuntimeError("err_code_3")
@actors.Message
def sample_message(self, value):
results.append("sample_message")
results.append(value)
self.actors().send_parent("stop_child")
class ParentActor(actors.Actor):
def __init__(self):
super().__init__()
self.child_id = None
def on_start(self):
results.append("parent_start")
self.child_id = self.actors().spawn(ChildActor)
self.actors().send(self.child_id, "sample_message", 1)
def on_stop(self):
results.append("parent_stop")
@actors.Message
def stop_child(self):
results.append("stop_child")
self.actors().stop(self.child_id)
root = ParentActor()
system = actors.ActorSystem(root)
system.start()
system.wait_for_shutdown()
# Parent still receives a stop signal
# Child failure in on_stop still raises a failure signal, which is not handled in the parent
self.assertEqual([
"parent_start", "child_start",
"sample_message", 1, "stop_child",
"child_stop", "parent_stop"], results)
# Error info should propagate up
code = system.shutdown_code()
error = system.shutdown_error()
self.assertNotEqual(0, code)
self.assertIsInstance(error, RuntimeError)
self.assertEqual("err_code_3", error.args[0])
def test_child_failure_signals(self):
results = []
class ChildActor(actors.Actor):
def on_start(self):
results.append("child_start")
def on_stop(self):
results.append("child_stop")
@actors.Message
def sample_message(self, value):
results.append("sample_message")
results.append(value)
raise RuntimeError("expected_error")
class ParentActor(actors.Actor):
def __init__(self):
super().__init__()
self.child_id = None
def on_start(self):
results.append("parent_start")
self.child_id = self.actors().spawn(ChildActor)
self.actors().send(self.child_id, "sample_message", 1)
def on_stop(self):
results.append("parent_stop")
def on_signal(self, signal: actors.Signal):
if signal.sender == self.child_id:
results.append("child_signal")
results.append(signal.message)
self.actors().stop()
# Intercept the signal - prevents propagation
| |
<filename>m700.py
# coding: utf-8
'''
三菱電機CNC M700シリーズとEZSocketを使って通信する。
通信対象はマシニングセンタ系三菱CNC M700/M700V/M70/M70V。
'''
from enum import Enum
import threading
import pythoncom
import win32com.client
from win32com.client import VARIANT
class M700():
#同一スレッド内で同一ホストの接続は同じインスタンスを使う
#同一スレッドなのは、COMオブジェクトを別スレッドで共有するのが複雑なため
__connections = {}
@classmethod
def get_connection(cls, host):
key = str(threading.current_thread().ident) + "_" + host
if key not in cls.__connections:
cls.__connections[key] = M700(host)
return cls.__connections[key]
#1-255の一意の値管理
__uno_list = [False]*255
@classmethod
def alloc_unitno(cls):
'''EZSocketで未使用のユニット番号を返す。
Returns:
int: ユニット番号
'''
for i,v in enumerate(cls.__uno_list):
if v == False:
cls.__uno_list[i] = True
return i+1
raise Exception("ユニット番号が255を超えました。同時接続数が多すぎます")
@classmethod
def release_unitno(cls, uno):
cls.__uno_list[uno-1] = False
# --- クラス内利用列挙体 ---
class RunStatus(Enum):
'''運転状態(valueはM700の返される値に対応している)'''
NOT_AUTO_RUN = 0
AUTO_RUN = 1
class Position(Enum):
'''X,Y,Z座標指定(valueはM700の返される値に対応している)'''
X = 1
Y = 2
Z = 3
class ProgramType(Enum):
'''メインorサブプログラム(valueはM700の返される値に対応している)'''
MAIN = 0
SUB = 1
class NCProgramFileOpenMode(Enum):
'''NC内のプログラムファイルを開く際にしているするモード'''
READ = 1
WRITE = 2
OVER_WRITE = 3
__ip = None
__port = None
__isopen = False
__ezcom = None
__lock = threading.RLock()
def __init__(self, host):
'''
Args:
host: IPアドレス:ポート番号
'''
pythoncom.CoInitialize() # 複数スレッドで実行する際は、COMオブジェクトの初期化が必要
self.__ip, self.__port = host.split(':')
def __str__(self):
return self.__ip + ":" + self.__port + " " + ("Open" if self.__isopen else "Close")
def __open(self):
'''引数として与えられたIPとユニット番号に対してコネクションを開く。
すでにオープン後に再度呼び出された場合は何もしない。'''
if not self.__isopen:
self.__ezcom = win32com.client.Dispatch('EZNcAut.DispEZNcCommunication')
errcd = self.__ezcom.SetTCPIPProtocol(self.__ip, int(self.__port))
self.__unitno = M700.alloc_unitno()
self.__raise_error(errcd)
# 引数: マシンタイプ番号(固定), ユニット番号, タイムアウト100ミリ秒, COMホスト名
# マシンタイプ6=EZNC_SYS_MELDAS700M(マシニングセンタ系三菱CNC M700/M700V/M70/M70V)
# ユニット番号は、1~255内で一意ものを指定する必要がある。
errcd = self.__ezcom.Open2(6, self.__unitno, 30, 'EZNC_LOCALHOST')
self.__raise_error(errcd)
self.__isopen = True
def close(self):
'''コネクションを閉じる。
内部でエラーが起こっても例外は呼び出し元に返さない
'''
try:
M700.release_unitno(self.__unitno) #ユニット番号の開放
self.__isopen = False
self.__ezcom.Close()
except:
pass
try:
self.__ezcom.Release()
except:
pass
def is_open(self):
'''__open()処理後、接続が開いているか確認する。
Return:
bool: 接続が開いているならTrue
'''
with self.__lock:
try:
self.__open()
except:
pass
return self.__isopen
# --- NC情報取得関連 ---
def get_drive_infomation(self):
'''利用可能なドライブ名を返す。
注意:ドライブ名は本来 "ドライブ名:CRLFドライブ名:CRLF...ドライブ名:CRLF¥0"で取得するので、
複数のドライブが存在する場合は、splitする必要がある。
Return:
str: ドライブ情報
'''
with self.__lock:
self.__open()
errcd, drive_info = self.__ezcom.File_GetDriveInformation()
self.__raise_error(errcd)
return drive_info[0:4]
def get_version(self):
'''NCのバージョンを返す
Return:
str: バージョン情報
'''
with self.__lock:
self.__open()
errcd, version = self.__ezcom.System_GetVersion(1, 0)
self.__raise_error(errcd)
return version
def get_current_position(self, axisno):
'''現在座標位置取得。
Args:
axisno (M700.Position.*): X or Y or Zを引数に渡す。
Return:
float: 現在座標位置
'''
with self.__lock:
if not isinstance(axisno, M700.Position):
raise Exception('列挙体[M700.Position.*]を指定してください。')
# in_1:取得したい軸。1=x, 2=y, 3=z
# pos:現在位置。
self.__open()
errcd, pos = self.__ezcom.Position_GetCurrentPosition(axisno.value)
self.__raise_error(errcd)
return pos
def get_run_status(self):
'''運転状態取得。
Return:
M700.RunStatus: 列挙体[M700.RunStatus]を返す。
'''
with self.__lock:
# in_1:運転の種類。1=自動運転中であるか?
# status:0=自動運転中でない。1=自動運転中である。
self.__open()
errcd, status = self.__ezcom.Status_GetRunStatus(1)
self.__raise_error(errcd)
if M700.RunStatus.AUTO_RUN.value == status:
return M700.RunStatus.AUTO_RUN
else:
return M700.RunStatus.NOT_AUTO_RUN
def get_rpm(self):
'''回転数(0~[rpm])取得。
Return:
int: 回転数
'''
with self.__lock:
# in_1:指定した主軸のパラメータ番号を指定。2=主軸(SR、SF)回転速度。0~[rpm]
# in_2:主軸番号を指定。
# data:主軸の状態を返す。
# info:主軸情報をUNICODE文字列として取得。
self.__open()
errcd, data, info = self.__ezcom.Monitor_GetSpindleMonitor(2, 1)
self.__raise_error(errcd)
return data
def get_load(self):
'''負荷(0~[%])取得。
Return:
int: 負荷
'''
with self.__lock:
# in_1:指定した主軸のパラメータ番号を指定。3=ロード。主軸モータの負荷。0~[%]
# in_2:主軸番号を指定。
# data:主軸の状態を返す。
# info:主軸情報をUNICODE文字列として取得。
self.__open()
errcd, data, info = self.__ezcom.Monitor_GetSpindleMonitor(3, 1)
self.__raise_error(errcd)
return data
def get_mgn_size(self):
'''マガジンサイズ取得。
Return:
int: マガジンサイズ
'''
with self.__lock:
# size:マガジンポットの総組数。値:0~360(最大)。
self.__open()
errcd, size = self.__ezcom.ATC_GetMGNSize()
self.__raise_error(errcd)
return size
def get_mgn_ready(self):
'''装着済みの工具番号取得。
Return:
int: 工具番号
'''
with self.__lock:
# in_1:マガジン番号を指定。値:1~2(M700/M800シリーズでは、値を設定しても無効)
# in_2:待機状態を指定。0=装着の工具番号、1=待機1の工具番号。2,3,4=1と同じ。
# toolno:工具の番号を返す。値は、1~99999999(最大)
self.__open()
errcd, toolno = self.__ezcom.ATC_GetMGNReady2(1, 0)
self.__raise_error(errcd)
return toolno
def get_toolset_size(self):
'''ツールセットのサイズ取得
ツールセットとは補正値NOのこと
Return:
int: ツールセットサイズ
'''
with self.__lock:
# plSize:200=200[組]
self.__open()
errcd, size = self.__ezcom.Tool_GetToolSetSize()
self.__raise_error(errcd)
return size
def get_tool_offset_h(self, toolset_no):
'''工具組番号の長オフセット値
Return:
int: 長
'''
with self.__lock:
# lType:工具オフセットのタイプ 4=マシニングセンタ系タイプⅡ
# lKind:オフセット量の種類 0=長, 1=長摩耗, 2=径, 3=径摩耗
# lToolSetNo:工具組番号
# pdOffset As DOUBLE* (O)オフセット量
# plNo As LONG* (O)仮想刃先点番号
self.__open()
errcd, h, plno = self.__ezcom.Tool_GetOffset2(4, 0, toolset_no)
self.__raise_error(errcd)
return h
def get_tool_offset_d(self, toolset_no):
'''工具組番号の長オフセット径
Return:
int: 径
'''
with self.__lock:
self.__open()
errcd, d, plno = self.__ezcom.Tool_GetOffset2(4, 2, toolset_no)
self.__raise_error(errcd)
return d
def set_tool_offset_h(self, toolset_no, h):
'''工具組番号オフセット長補正値をセットする'''
with self.__lock:
# lType:工具オフセットのタイプ 4=マシニングセンタ系タイプⅡ
# lKind:オフセット量の種類 0=長, 1=長摩耗, 2=径, 3=径摩耗
# lToolSetNo:工具組番号
# pdOffset As DOUBLE* オフセット量
# plNo As LONG* 仮想刃先点番号
self.__open()
errcd = self.__ezcom.Tool_SetOffset(4, 0, toolset_no, h, 0)
self.__raise_error(errcd)
errcd = self.__ezcom.Tool_SetOffset(4, 2, toolset_no, d, 0)
self.__raise_error(errcd)
def set_tool_offset_d(self, toolset_no, d):
'''工具組番号オフセット径補正値をセットする'''
with self.__lock:
self.__open()
errcd = self.__ezcom.Tool_SetOffset(4, 2, toolset_no, d, 0)
self.__raise_error(errcd)
def get_program_number(self, progtype):
'''サーチ完了、又は自動運転中のプログラムの番号を取得。
Args:
progtype (M700.ProgramType.*): MAIN or SUBを引数に渡す。
Return:
str: プログラム番号
'''
with self.__lock:
if not isinstance(progtype, M700.ProgramType):
raise Exception('列挙体[M700.ProgramType.*]を指定してください。')
# in_1:0=メインプログラム, 1=サブプログラム
self.__open()
errcd, msg = self.__ezcom.Program_GetProgramNumber2(progtype.value)
self.__raise_error(errcd)
return msg
def get_alerm(self):
'''アラートを取得。
Return:
str: エラーメッセージ
'''
with self.__lock:
# in_1:取得するメッセージ行数。1~10(最大)
# in_2:取得するアラーム種類。
# msg:エラーメッセージ
self.__open()
errcd, msg = self.__ezcom.System_GetAlarm2(3, 0)
self.__raise_error(errcd)
return msg
# --- NCプログラムファイル操作関連 ---
def read_file(self, path):
'''ファイルを読み出しする。
Args:
path (str): 絶対パス exp) M01:¥PRG¥USER¥100
Return:
bytes: 読み出したバイトデータを返す。
'''
with self.__lock:
self.__open()
try:
errcd = self.__ezcom.File_OpenFile3(path, M700.NCProgramFileOpenMode.READ.value)
self.__raise_error(errcd)
result = b''
while True:
errcd, data = self.__ezcom.File_ReadFile2(256) #一回で読み出すデータサイズをバイト数
self.__raise_error(errcd)
result += data #読み出したバイトデータの配列をVARIANT
if len(data) < 256:
break
return result
finally:
try:
self.__ezcom.File_CloseFile2()
except:
pass
def write_file(self, path, data):
'''ファイルに書き込みする。
Args:
path (str): 絶対パス exp) M01:¥PRG¥USER¥100
data (bytes): 書き込むデータをバイトデータで渡す
'''
with self.__lock:
self.__open()
try:
errcd = self.__ezcom.File_OpenFile3(path, M700.NCProgramFileOpenMode.OVER_WRITE.value)
self.__raise_error(errcd)
errcd = self.__ezcom.File_WriteFile(memoryview(data)) #書き込むデータをバイトデータの配列
self.__raise_error(errcd)
finally:
try:
self.__ezcom.File_CloseFile2()
except:
pass
def delete_file(self, path):
'''パス名を指定してファイルを削除する。
Args:
path (str): 絶対パス exp) M01:¥PRG¥USER¥100
'''
with self.__lock:
self.__open()
errcd = self.__ezcom.File_Delete2(path)
self.__raise_error(errcd)
# --- NCディレクトリ操作関連 --
def find_dir(self, path):
'''パス名を指定してファイルを検索する。
Args:
path (str): ディレクトリパス exp) M01:¥PRG¥USER¥
Return:
list: 検索結果のリスト。中身は辞書データで1件ごとのデータを管理。
exp) [{ 'type': 'file', 'name': '100', 'size': '19', 'comment': 'BY IKEHARA' }, ...]
'''
with self.__lock:
result = []
try:
self.__open()
#M01 → Mユニット番号16進数
path = path.replace("M01", "M{:02X}".format(self.__unitno))
# 指定パス内のディレクトリ情報を取得 (-1で'ディレクトリ名\tサイズ'の文字列を取得)
errcd, info = self.__ezcom.File_FindDir2(path, -1)
self.__raise_error(errcd)
while True:
# ディレクトリ情報有り
if errcd > 1:
dir_info = info.split('\t')
data = {
'type': 'folder',
'name': dir_info[0],
'size': '{:,}'.format(int(dir_info[1])),
'comment': None
}
result.append(data)
else:
break
errcd, info = self.__ezcom.File_FindNextDir2()
self.__raise_error(errcd)
# 一旦リセット
errcd = self.__ezcom.File_ResetDir()
self.__raise_error(errcd)
# 指定パス内のファイル情報を取得 (5で'ファイル名\tサイズ\tコメント'の文字列を取得)
errcd, info = self.__ezcom.File_FindDir2(path, 5)
self.__raise_error(errcd)
while True:
# ファイル情報有り
if errcd > 1:
dir_info = info.split('\t')
data = {
'type': 'file',
'name': dir_info[0],
'size': '{:,}'.format(int(dir_info[1])),
'comment': dir_info[2]
}
result.append(data)
else:
break
errcd, info = self.__ezcom.File_FindNextDir2()
self.__raise_error(errcd)
finally:
try:
errcd = self.__ezcom.File_ResetDir()
self.__raise_error(errcd)
except:
pass
return result
# --- NCデバイス操作関連 ---
def __setting_dev(self, dev, data=0):
'''デバイスの設定を行う。
Args:
dev (str): デバイス指定。exp) M810, D10
data (int): 値。ビットを立てる場合は1、下げる場合は0。
read_devの場合は、ダミーとして適当な文字を入れる。
'''
data_type = 0 # 1 or 4 or 8 exp) M=1(ビット型 1bit), D=4(ワード型 16bit)
if dev[0] == 'M':
data_type = 1
elif dev[0] == 'D':
data_type = 4
else:
Exception('Mデバイス、又はDデバイスを設定して下さい。')
# in_1:デバイス文字列(設定するデバイス文字列の配列をVARIANTとして指定)
# in_2:データ種別
# in_3:デバイス値配列
vDevice = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_BSTR, [dev])
vDataType = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_I4, [data_type])
vValue = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_I4, [data]) # 書き込むデータは現在数値のみ
errcd = self.__ezcom.Device_SetDevice(vDevice, vDataType, vValue)
self.__raise_error(errcd)
def __delall_dev(self):
'''デバイス設定を全て削除。'''
errcd = self.__ezcom.Device_DeleteAll()
self.__raise_error(errcd)
def read_dev(self, dev):
'''デバイス読み出し。__setting_devで設定したデバイスの値を読み込む。
Args:
dev (str): デバイス番号 exp) M900
Return:
int: 読み出したデータの値を返す。
'''
with self.__lock:
self.__open()
self.__setting_dev(dev)
errcd, value = self.__ezcom.Device_Read() # value:デバイス値配列が返ってくる。
self.__raise_error(errcd)
self.__delall_dev()
return value[0]
def write_dev(self, dev, data):
'''デバイス書き込み。__setting_devで設定したデバイスに値を書き込む。
Args:
dev (str): デバイス番号 exp) M900
data (int): 書き込む値
'''
with self.__lock:
self.__open()
self.__setting_dev(dev, data)
errcd = self.__ezcom.Device_Write()
self.__delall_dev()
self.__raise_error(errcd)
# --- エラー出力関連 ---
def __raise_error(self, errcd):
'''エラーコードから、エラーの内容をExceptionとして返す。
エラーがない場合(エラーコードが0)は何もしない。
エラーの内容は、辞書で {'16進数エラーコード': 'error detail message'} の形で登録。
Raises:
Exception: エラーメッセージ
'''
__errmap = {
"0x80a00101" : "通信回線がオープンされていません",
"0x80a00104" : "2重オープンエラー",
"0x80a00105" : "引数のデータタイプが不正",
"0x80a00106" : "引数のデータ範囲が不正",
"0x80a00107" : "サポートしていない",
"0x80a00109" : "通信回線がオープンできません",
"0x80a0010a" : "引数がnullポインタです。",
"0x80a0010b" : "引数のデータ不正",
"0x80a0010c" : "COMMポートハンドルエラー",
"0x80b00101" : "メモリの確保ができない",
"0x80b00102" : "EZSocketPcのエラーが取得できない",
"0x80b00201" : "モード指定不正",
"0x80b00202" : "未ファイルオープン",
"0x80b00203" : "ファイルが既に存在する",
"0x80b00204" : "既にファイルオープンしている",
"0x80b00205" : "テンポラリファイルを作成できない",
"0x80b00206" : "書き込みモード指定でファイルオープンしていない",
"0x80b00207" : "書き込みデータサイズ不正",
"0x80b00208" : "書き込みできない状態",
"0x80b00209" : "読み出しモード指定でファイルオープンしていない",
"0x80b0020a" : "読み出しできない状態",
"0x80b0020b" : "テンポラリファイルを作成できない",
"0x80b0020c" : "ファイルが存在しない(readモード)",
"0x80b0020d" : "ファイルがオープンできない",
"0x80b0020e" : "ファイルのパスが不正",
"0x80b0020f" : "読み出しファイルが不正",
"0x80b00210" : "書き込みファイルが不正",
"0x80b00301" : "オートメーション呼び出しでローカル接続時のホスト名が不正",
"0x80b00302" : "TCP/IP通信が設定されていない",
"0x80b00303" : "既に通信中なので設定できない",
"0x80b00304" : "下位モジュールがない",
"0x80b00305" : "EZSocketPcオブジェクトが生成できない",
"0x80b00401" : "データが存在しない",
"0x80b00402" : "データ重複",
"0x80b00501" : "パラメータ情報ファイルがない",
"0x80020190" : "NCカード番号不正",
"0x80020102" : "デバイスがオープンされていない",
"0x80020132" : "コマンド不正",
"0x80020133" : "通信パラメータデータ範囲不正",
"0x80030143" : "ファイルシステムに異常がある",
"0x80030191" : "ディレクトリが存在しない",
"0x8003019b" : "ドライブが存在しない",
"0x800301a2" : "ディレクトリが存在しない",
"0x800301a8" : "ドライブが存在しない",
"0x80050d90" : "系統、軸指定が不正",
"0x80050d02" : "アラーム種類が不正",
"0x80050d03" : "NCとPC間の通信データにエラーがある",
"0x80041194" : "寿命管理データの種類指定不正",
"0x80041195" : "設定データ範囲オーバ",
"0x80041196" : "設定工具番号不一致",
"0x80041197" : "指定工具番号が仕様外",
"0x80040190" : "系統、軸指定が不正",
"0x80040191" : "大区分番号不正",
"0x80040192" : "小区分番号不正",
"0x80040196" : "アプリケーションが用意したバッファに入りきらない",
"0x80040197" : "データタイプ不正",
"0x8004019d" : "データが読み出せない状態にある",
"0x8004019f" : "書き込み専用データ",
"0x800401a0" : "軸指定不正",
"0x800401a1" : "データ番号不正",
"0x800401a3" : "読み出しデータなし",
"0x8004019a" : "読み出しデータ範囲不正",
"0x80040290" : "系統、軸指定が不正",
"0x80040291" : "大区分番号不正",
"0x80040292" : "小区分番号不正",
"0x80040296" : "アプリケーションが用意したバッファに入りきらない",
| |
= self.screen.width - len(minion.name) - 18
self.screen.write_body(row, 3 + len(minion.name) + 1, "." * dots_len,
CursesScreen.COLOR_MINION, False, selected, True)
timer_str = "({})".format(self.ftime(
(minion.end_time if minion.finished() else now) - minion.begin_time))
if not minion.finished():
color = CursesScreen.COLOR_MINION
self.screen.write_body(row, 3 + len(minion.name) + 1 + dots_len + 2,
self.loading.loading_string(), color, False, selected,
True)
color = CursesScreen.COLOR_MINION if selected else CursesScreen.COLOR_MARKER
self.screen.write_body(row, self.screen.body_width - len(timer_str), timer_str,
color, False, selected, True)
else:
color = CursesScreen.COLOR_MINION if selected else (
CursesScreen.COLOR_WARNING if minion.warnings else (
CursesScreen.COLOR_SUCCESS if minion.success else CursesScreen.COLOR_ERROR))
icon = "⚠" if minion.warnings else ("✓" if minion.success else "╳")
self.screen.write_body(row, 3 + len(minion.name) + 1 + dots_len + 2,
icon, color, False, selected,
True)
color = CursesScreen.COLOR_MINION if selected else CursesScreen.COLOR_MARKER
self.screen.write_body(row, self.screen.body_width - len(timer_str), timer_str,
color, False, selected, True)
def _render_stage_row(self, stage, row, col, now):
self.screen.write_body(row, col, stage.desc, CursesScreen.COLOR_STAGE)
dots_len = self.screen.body_width - len(stage.desc) - 20
self.screen.write_body(row, col + len(stage.desc) + 1, "." * dots_len,
CursesScreen.COLOR_STAGE)
if stage.begin_time:
timer_str = "({})".format(self.ftime(
(stage.end_time if stage.finished() else now) - stage.begin_time))
else:
timer_str = " "
if stage.finished():
color = CursesScreen.COLOR_WARNING if stage.warning else (
CursesScreen.COLOR_SUCCESS if stage.success else CursesScreen.COLOR_ERROR)
icon = "⚠" if stage.warning else ("✓" if stage.success else "╳")
self.screen.write_body(row, col + len(stage.desc) + 1 + dots_len + 2,
icon, color)
else:
self.screen.write_body(row, col + len(stage.desc) + 1 + dots_len + 2,
self.loading.loading_string(), CursesScreen.COLOR_STAGE)
self.screen.write_body(row, self.screen.body_width - len(timer_str), timer_str,
CursesScreen.COLOR_MARKER)
def _render_step_row(self, step, row, col, now):
self.screen.write_body(row, col, step.desc, CursesScreen.COLOR_STEP)
dots_len = self.screen.width - len(step.desc) - 24
self.screen.write_body(row, col + len(step.desc) + 1, "." * dots_len,
CursesScreen.COLOR_STEP)
if step.begin_time:
timer_str = "({})".format(self.ftime(
(step.end_time if step.finished() else now) - step.begin_time))
else:
timer_str = " "
if step.finished():
color = CursesScreen.COLOR_SUCCESS if step.success else CursesScreen.COLOR_ERROR
self.screen.write_body(row, col + len(step.desc) + 1 + dots_len + 2,
"✓" if step.success else "╳", color)
else:
self.screen.write_body(row, col + len(step.desc) + 1 + dots_len + 2,
self.loading.loading_string(), CursesScreen.COLOR_STEP)
self.screen.write_body(row, self.screen.body_width - len(timer_str), timer_str,
CursesScreen.COLOR_MARKER)
if step.failure:
self.screen.write_body(row + 1, col, "|_", CursesScreen.COLOR_MARKER)
return self._render_failure(step.failure, row + 1, col + 3) + 1
return 1
@staticmethod
def break_lines(desc, width):
"""
Breaks the string into an array of strings of max length width
"""
desc = desc.replace("\n", "")
# list of characters that we will use to split a string in order of
# preference
split_chars = (' ', '|', ',', ')', '(', '/')
def find_split_idx(text, reverse=True):
for ch in split_chars:
if reverse:
idx = text.rfind(ch)
else:
idx = text.find(ch)
if idx != -1:
return idx
return -1
result = []
while len(desc) > width:
idx = find_split_idx(desc[1:width])
if idx != -1:
idx += 1
result.append(desc[0:idx])
desc = desc[idx:]
else:
idx = find_split_idx(desc[width:], False)
if idx != -1:
idx = idx + width
result.append(desc[:idx])
desc = desc[idx:]
else:
break
result.append(desc)
return result
def _render_failure(self, failure, row, col):
if isinstance(failure, str):
line_num = 0
for line in failure.split('\n'):
line_width = self.screen.body_width - col - 1
for sline in self.break_lines(line, line_width):
self.screen.write_body(row + line_num, col, sline, CursesScreen.COLOR_ERROR)
line_num += 1
return line_num
self.screen.write_body(row, col, failure['__id__'], CursesScreen.COLOR_ERROR)
col += 3
st_match = re.match(r'^(.+)_\|-.+_\|.+_\|-(.+)$', failure['state'].replace('\n', ''))
state = "{}.{}".format(st_match[1], st_match[2])
line_num = 1
self.screen.write_body(row + line_num, col, "SLS: ", CursesScreen.COLOR_MARKER)
self.screen.write_body(row + line_num, col + 5, failure['__sls__'],
CursesScreen.COLOR_ERROR)
line_num += 1
self.screen.write_body(row + line_num, col, "State: ", CursesScreen.COLOR_MARKER)
self.screen.write_body(row + line_num, col + 7, state, CursesScreen.COLOR_ERROR)
line_num += 1
if state == "cmd.run":
self.screen.write_body(row + line_num, col, "Command: ", CursesScreen.COLOR_MARKER)
line_width = self.screen.body_width - col - len("Command: ") - 1
for line in self.break_lines(
failure['name'].replace('\n', ' ').replace('\\', ' '), line_width):
self.screen.write_body(row + line_num, col + len("Command: "),
line.strip(), CursesScreen.COLOR_ERROR)
line_num += 1
self.screen.write_body(row + line_num, col, "Error Description: ",
CursesScreen.COLOR_MARKER)
line_num += 1
for line in failure['changes'].get('stderr', '').split('\n'):
line_width = self.screen.body_width - col - 4
for sline in self.break_lines(line, line_width):
self.screen.write_body(row + line_num, col + 3, sline,
CursesScreen.COLOR_ERROR)
line_num += 1
else:
self.screen.write_body(row + line_num, col, "Error Description: ",
CursesScreen.COLOR_MARKER)
line_num += 1
line_width = self.screen.body_width - col - 4
for line in failure.get('comment', '').split('\n'):
for sline in self.break_lines(line, line_width):
self.screen.write_body(row + line_num, col + 3, sline, CursesScreen.COLOR_ERROR)
line_num += 1
return line_num + 1
def _render_minion(self, minion, minion_id, row, selected, now):
self._render_minion_row(minion, minion_id, row, selected, now)
if not self._is_minion_expanded(minion_id):
if minion.finished():
return 1
stage = minion.last_stage
if stage:
self.screen.write_body(row + 1, 3, "|_", CursesScreen.COLOR_MARKER)
self._render_stage_row(stage, row + 1, 6, now)
step = stage.last_step
if step:
self.screen.write_body(row + 2, 6, "|_", CursesScreen.COLOR_MARKER)
self._render_step_row(step, row + 2, 9, now)
return 3
return 2
return 1
idx = 1
if minion.stages:
self.screen.write_body(row + idx, 3, "|_", CursesScreen.COLOR_MARKER)
for stage in minion.stages.values():
if isinstance(stage, (dict, str)):
# failure report
idx += self._render_failure(stage, row + idx, 6)
continue
self._render_stage_row(stage, row + idx, 6, now)
idx += 1
if stage.steps:
self.screen.write_body(row + idx, 6, "|_", CursesScreen.COLOR_MARKER)
for step in stage.steps.values():
if isinstance(step, dict) or isinstance(stage, str):
idx += self._render_failure(step, row + idx, 9)
continue
idx += self._render_step_row(step, row + idx, 9, now)
return idx
def _update_screen(self):
with self._render_lock:
now = datetime.datetime.utcnow()
self._render_header(now)
self._render_footer()
self.screen.clear_body()
row = 0
for minion_id, minion in enumerate(self.model.minions_list()):
num_rows = self._render_minion(minion, minion_id, row, self.selected == minion_id,
now)
if self.selected == minion_id and self.minions_ui[minion_id]['jump_to']:
self.minions_ui[minion_id]['row'] = row
self.minions_ui[minion_id]['lines'] = num_rows
self.screen.clear_row(row + num_rows)
row += num_rows + 1
for minion in self.minions_ui.values():
if minion['jump_to']:
minion['jump_to'] = False
self.screen.make_visible(minion['row'], minion['lines'])
self.screen.refresh()
def _is_minion_expanded(self, idx):
return self.minions_ui[idx]['expanded']
def up_key(self):
if self.selected is None:
self.selected = 0
else:
if self.selected > 0:
self.selected -= 1
self.minions_ui[self.selected]['jump_to'] = True
def down_key(self):
if self.selected is None:
self.selected = 0
else:
if self.selected + 1 < self.model.minions_total():
self.selected += 1
self.minions_ui[self.selected]['jump_to'] = True
def action_key(self):
if self.selected is None:
return
self.minions_ui[self.selected]['expanded'] = not self.minions_ui[self.selected]['expanded']
self.minions_ui[self.selected]['jump_to'] = True
def quit_key(self):
if self.model.finished():
self.running = False
def pause_key(self):
if self.model.finished():
self.paused = False
elif self.paused is not None:
self.paused = not self.paused
def _all_collapsed(self):
for minion in self.minions_ui.values():
if minion['expanded']:
return False
return True
def collapse_expand_all_key(self):
all_collap = self._all_collapsed()
for minion in self.minions_ui.values():
minion['expanded'] = all_collap
@staticmethod
def ftime(tr):
if tr.seconds > 0:
return "{}s".format(int(round(tr.seconds + tr.microseconds / 1000000.0)))
return "{}s".format(round(tr.seconds + tr.microseconds / 1000000.0, 1))
def execution_stopped(self):
pass
def minion_failure(self, minion: str, failure: dict):
for minion_ui in self.minions_ui.values():
if minion_ui['minion'] == minion:
minion_ui['expanded'] = True
break
def run(self):
self.loading.start()
self.running = True
self.paused = False
has_failed = False
try:
self.screen.start()
finished = False
paused = False
logger.info("started render loop")
while self.running:
finished = finished and self.model.finished()
paused = paused and self.paused
if (self.screen.wait_for_event() or not finished) and not paused:
if self.model.finished():
finished = True
if self.paused:
paused = True
self._update_screen()
logger.info("finished render loop")
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
has_failed = True
self.screen.shutdown()
self.loading.stop()
if has_failed:
PP.println("An error occurred in the UI, please check "
"'{}' for further details.".format(LoggingUtil.log_file))
else:
if self.model.minions_with_warnings():
PP.println()
for minion in self.model.minions_list():
for warning in minion.warnings:
PP.pl_orange('WARNING: {} - {}'.format(minion.name, warning))
PP.println()
PP.println("{}. Log file may be found at '{}'.".format(
"Finished with warnings" if self.model.minions_with_warnings() else "Finished",
LoggingUtil.log_file))
class TerminalRenderer(Renderer):
def execution_started(self):
PP.println("Starting the execution of: {}".format(self.cmd_str))
PP.println()
def minion_update(self, minion: str):
minion = self.model.get_minion(minion)
if minion.finished():
PP.println("[{}] [{:<16}] Finished {}"
.format(minion.end_time, minion.name[:16],
"successfully" if minion.success else "with failures"))
return
stage = minion.last_stage
if stage is not None:
step = stage.last_step
if stage.finished():
PP.println("[{}] [{:<16}] [STAGE] [END ] {}"
.format(stage.end_time, minion.name[:16], stage.desc))
elif step is not None and step.finished():
PP.println("[{}] [{:<16}] [STEP ] [END ] {}"
.format(step.end_time, minion.name[:16], step.desc))
elif step is not None:
PP.println("[{}] [{:<16}] [STEP ] [BEGIN] {}"
.format(step.begin_time, minion.name[:16], step.desc))
else:
PP.println("[{}] [{:<16}] [STAGE] [BEGIN] {}"
.format(stage.begin_time, minion.name[:16], stage.desc))
def minion_failure(self, minion, failure):
PP.println()
PP.println("Failure in minion: {}".format(minion))
PP.println(yaml.dump(failure, indent=2, default_flow_style=False))
PP.println()
def execution_stopped(self):
super(TerminalRenderer, self).execution_stopped()
if self.model.minions_with_warnings():
PP.println()
for minion in self.model.minions_list():
for warning in minion.warnings:
PP.println('WARNING: {} - {}'.format(minion.name, warning))
PP.println()
PP.println("Finished execution of {} formula".format(self.model.state))
PP.println()
PP.println("Summary: Total={} Succeeded={} Warnings={} Failed={}"
.format(self.model.minions_total(),
self.model.minions_succeeded(),
self.model.minions_with_warnings(),
self.model.minions_failed()))
class CephSaltExecutor:
def __init__(self, interactive, minion_id, state, pillar, prompt_proceed):
| |
test = {
'name': 'Problem 10',
'points': 2,
'suites': [
{
'cases': [
{
'code': r"""
>>> p0 = [2, 2, 3]
>>> p1 = [6, 1, 2]
>>> fastest_words(match(['What', 'great', 'luck'], [p0, p1]))
[['What'], ['great', 'luck']]
>>> p0 = [2, 2, 3]
>>> p1 = [6, 1, 3]
>>> fastest_words(match(['What', 'great', 'luck'], [p0, p1])) # with a tie, choose the first player
[['What', 'luck'], ['great']]
>>> p2 = [4, 3, 1]
>>> fastest_words(match(['What', 'great', 'luck'], [p0, p1, p2]))
[['What'], ['great'], ['luck']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p0 = [5, 1, 3]
>>> p1 = [4, 1, 6]
>>> fastest_words(match(['Just', 'have', 'fun'], [p0, p1]))
[['have', 'fun'], ['Just']]
>>> p0 # input lists should not be mutated
[5, 1, 3]
>>> p1
[4, 1, 6]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[2, 4, 3, 5, 1]]
>>> fastest_words(match(['newsstand', 'stereochromy', 'quinaldine', 'invalidate', 'japingly'], p))
[['newsstand', 'stereochromy', 'quinaldine', 'invalidate', 'japingly']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[4, 1, 1], [2, 5, 5]]
>>> fastest_words(match(['unstatesmanlike', 'median', 'cueca'], p))
[['median', 'cueca'], ['unstatesmanlike']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[1, 3, 2, 4, 3]]
>>> fastest_words(match(['introspectional', 'squamigerous', 'sair', 'heterodromy', 'butylene'], p))
[['introspectional', 'squamigerous', 'sair', 'heterodromy', 'butylene']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[], [], []]
>>> fastest_words(match([], p))
[[], [], []]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[2, 3, 5, 2, 1, 5], [3, 5, 3, 5, 4, 1], [2, 1, 3, 1, 2, 3]]
>>> fastest_words(match(['musiclike', 'nonregarding', 'oxypropionic', 'postvide', 'muncheel', 'reburial'], p))
[['musiclike', 'muncheel'], ['oxypropionic', 'reburial'], ['nonregarding', 'postvide']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[4, 1, 1, 5, 2], [1, 4, 5, 4, 2], [5, 3, 2, 2, 3]]
>>> fastest_words(match(['nuggety', 'phlegmatous', 'doomsman', 'butterfingered', 'scouse'], p))
[['phlegmatous', 'doomsman', 'scouse'], ['nuggety'], ['butterfingered']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[5], [3], [3]]
>>> fastest_words(match(['cixiid'], p))
[[], ['cixiid'], []]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[4]]
>>> fastest_words(match(['accredit'], p))
[['accredit']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[1]]
>>> fastest_words(match(['electroextraction'], p))
[['electroextraction']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[4, 2, 5, 4], [1, 3, 2, 1], [4, 2, 5, 1]]
>>> fastest_words(match(['termolecular', 'unbeatably', 'unamenable', 'ratio'], p))
[['unbeatably'], ['termolecular', 'unamenable', 'ratio'], []]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[2, 1, 2, 3, 1], [2, 1, 3, 1, 5]]
>>> fastest_words(match(['interlardment', 'supercargo', 'inquilinity', 'mackenboy', 'trauma'], p))
[['interlardment', 'supercargo', 'inquilinity', 'trauma'], ['mackenboy']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[], []]
>>> fastest_words(match([], p))
[[], []]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[1, 2, 5, 2, 3], [4, 3, 1, 1, 5], [3, 2, 4, 5, 4]]
>>> fastest_words(match(['chromo', 'casson', 'unpliableness', 'overweeningly', 'unsquandered'], p))
[['chromo', 'casson', 'unsquandered'], ['unpliableness', 'overweeningly'], []]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[], [], []]
>>> fastest_words(match([], p))
[[], [], []]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[5, 3, 1, 1]]
>>> fastest_words(match(['negotiatrix', 'attaintment', 'concurringly', 'glyoxaline'], p))
[['negotiatrix', 'attaintment', 'concurringly', 'glyoxaline']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[4, 4, 2, 1, 3]]
>>> fastest_words(match(['marble', 'undeleted', 'subrogation', 'lownly', 'nebulosity'], p))
[['marble', 'undeleted', 'subrogation', 'lownly', 'nebulosity']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[5, 2, 1, 1, 1, 3], [3, 5, 1, 2, 3, 3]]
>>> fastest_words(match(['pectous', 'kathal', 'supercargoship', 'keelblock', 'celiosalpingectomy', 'pronumber'], p))
[['kathal', 'supercargoship', 'keelblock', 'celiosalpingectomy', 'pronumber'], ['pectous']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[5, 2, 2, 2, 1, 3], [3, 4, 4, 4, 2, 2]]
>>> fastest_words(match(['coalhole', 'osmotic', 'barnard', 'irreligiousness', 'nitrobacteria', 'cellarless'], p))
[['osmotic', 'barnard', 'irreligiousness', 'nitrobacteria'], ['coalhole', 'cellarless']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[2, 3, 3], [1, 1, 3], [2, 3, 3]]
>>> fastest_words(match(['incendiarism', 'carbamide', 'families'], p))
[['families'], ['incendiarism', 'carbamide'], []]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[3, 1, 3, 2, 3, 3], [5, 1, 2, 4, 2, 5]]
>>> fastest_words(match(['heaps', 'kitling', 'workhouse', 'scriver', 'chilicothe', 'anteprandial'], p))
[['heaps', 'kitling', 'scriver', 'anteprandial'], ['workhouse', 'chilicothe']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[3, 1, 1, 3], [3, 4, 4, 1], [1, 2, 3, 3]]
>>> fastest_words(match(['brat', 'structureless', 'opacous', 'successfully'], p))
[['structureless', 'opacous'], ['successfully'], ['brat']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[], []]
>>> fastest_words(match([], p))
[[], []]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[4, 5, 1, 5], [3, 5, 1, 3]]
>>> fastest_words(match(['saponify', 'bakerless', 'nonluminous', 'zonesthesia'], p))
[['bakerless', 'nonluminous'], ['saponify', 'zonesthesia']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[], [], []]
>>> fastest_words(match([], p))
[[], [], []]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[2, 5, 4], [5, 4, 3], [4, 4, 4]]
>>> fastest_words(match(['uranophane', 'whereso', 'toolmaking'], p))
[['uranophane'], ['whereso', 'toolmaking'], []]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[3, 1, 5, 5, 2, 5]]
>>> fastest_words(match(['ali', 'indult', 'palmitic', 'carbon', 'scudder', 'novaculite'], p))
[['ali', 'indult', 'palmitic', 'carbon', 'scudder', 'novaculite']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[1, 5, 3, 2, 4, 2], [5, 1, 3, 4, 1, 3]]
>>> fastest_words(match(['telangiectasy', 'unratable', 'dissolvableness', 'redheadedly', 'recluse', 'galloon'], p))
[['telangiectasy', 'dissolvableness', 'redheadedly', 'galloon'], ['unratable', 'recluse']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[]]
>>> fastest_words(match([], p))
[[]]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[5], [1]]
>>> fastest_words(match(['incorporable'], p))
[[], ['incorporable']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[2, 1, 4], [2, 1, 2]]
>>> fastest_words(match(['accresce', 'during', 'unreproachableness'], p))
[['accresce', 'during'], ['unreproachableness']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[4, 2, 4, 2, 2], [2, 4, 3, 3, 5]]
>>> fastest_words(match(['counterprotection', 'karyolysis', 'contuse', 'esophagomalacia', 'investigatorial'], p))
[['karyolysis', 'esophagomalacia', 'investigatorial'], ['counterprotection', 'contuse']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[], [], []]
>>> fastest_words(match([], p))
[[], [], []]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[2, 4, 3, 2, 5, 4], [2, 4, 2, 3, 4, 1]]
>>> fastest_words(match(['driftpiece', 'archaic', 'oreotragine', 'nystagmic', 'refute', 'wellhole'], p))
[['driftpiece', 'archaic', 'nystagmic'], ['oreotragine', 'refute', 'wellhole']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[]]
>>> fastest_words(match([], p))
[[]]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[5, 4], [4, 3]]
>>> fastest_words(match(['colly', 'ransackle'], p))
[[], ['colly', 'ransackle']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[1, 2, 1, 4], [4, 1, 1, 2]]
>>> fastest_words(match(['clodpated', 'subcouncil', 'digestment', 'hierocratic'], p))
[['clodpated', 'digestment'], ['subcouncil', 'hierocratic']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[3, 3], [5, 2]]
>>> fastest_words(match(['swearingly', 'pimple'], p))
[['swearingly'], ['pimple']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
{
'code': r"""
>>> p = [[3, 4, 4]]
>>> fastest_words(match(['unbungling', 'rizzle', 'undistinguishableness'], p))
[['unbungling', 'rizzle', 'undistinguishableness']]
""",
'hidden': False,
'locked': False,
'multiline': False
},
| |
<gh_stars>10-100
import discord
import brawlstats
import box
import re
from discord.ext import commands
class BS(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.client = brawlstats.Client(
token=bot.config.bsapi,
session=bot.session,
is_async=True
)
def check_tag(self, tag):
return [char for char in tag if char.upper() not in '0289PYLQGRJCUV']
async def get_tag(self, id, position):
find = await self.bot.db.bstags.find_one({"id": id})
found_tag = None
try:
found_tag = find["tag"][position]
except:
pass
return found_tag
def sort_brawlers(self, brawlers):
brawler_list = []
for x in brawlers:
x.update({"sorted": False})
brawler_list.append(x)
print(brawler_list)
trophy_list = sorted([x['trophies'] for x in brawlers])
sorted_list = []
counter = 0
while counter < len(brawlers):
for brawler in brawler_list:
if trophy_list[counter] == brawler['trophies'] and not brawler["sorted"]:
sorted_list.append(brawler)
brawler_list[brawler_list.index(brawler)]["sorted"] = True
break
counter += 1
return reversed(sorted_list)
def emoji(self, _id):
return self.bot.get_emoji(_id)
def brawler(self, name):
name = name.replace("8-Bit", "8bit")
name = name.replace("Mr. P", "mrp")
name = name.replace(" ", "")
name = name.lower()
to_return = discord.utils.get(self.bot.get_guild(645624855580114965).emojis, name=name)
if not to_return:
to_return = self.bot.get_emoji(650856644388847637)
return to_return
def between(self, number, min, max):
return min <= number < max
def get_event_emoji(self, name):
events = {
"Gemgrab": self.bot.get_emoji(650852285613604890),
"Gem Grab": self.bot.get_emoji(650852285613604890),
"Heist": self.bot.get_emoji(650852285794222138),
"Bounty": self.bot.get_emoji(650852285395632211),
"Siege": self.bot.get_emoji(650854441599107103),
"Brawlball": self.bot.get_emoji(650852285794091047),
"Brawl Ball": self.bot.get_emoji(650852285794091047),
"Lone Star": self.bot.get_emoji(650852284896641024),
"Takedown": self.bot.get_emoji(650852284934127657),
"Roborumble": self.bot.get_emoji(650852285131390980),
"Robo Rumble": self.bot.get_emoji(650852285131390980),
"Biggame": self.bot.get_emoji(650852285328523294),
"Big Game": self.bot.get_emoji(650852285328523294),
"Bossfight": self.bot.get_emoji(650852285056024597),
"Boss Fight": self.bot.get_emoji(650852285056024597),
"Showdown": self.bot.get_emoji(650852285160751135),
"Duo Showdown": self.bot.get_emoji(680934625228750925)
}
if name in events.keys():
return events[name]
else:
return self.bot.get_emoji(650856644388847637)
def fmt_time(self, time):
if time < 3600:
minutes = time // 60
seconds = time - minutes * 60
if 0 <= seconds <= 9:
seconds = "0" + str(seconds)
return f"{minutes}:{seconds}"
if time < 86400:
hours = int(time / 3600)
minutes = int(time / 60 - hours * 60)
return f"{hours} hrs, {minutes} mins"
else:
days = int(time/86400)
hours = int(time / 3600 - days * 24)
minutes = int(time / 60 - hours * 60 - days * 1440)
return f"{days} days, {hours} hrs, {minutes} mins"
def timestamp(self, time):
mins = time // 60
seconds = time % 60
return f"{mins}:{seconds}"
async def cog_command_error(self, ctx, error):
if isinstance(error, brawlstats.RequestError):
em = discord.Embed(
color=discord.Color.red(),
title=f'{error.code} - {error.__class__.__name__}',
description=error.error.split('\nURL')[0] # chop off the requested URL
)
await ctx.send(embed=em)
@commands.command()
async def bssave(self, ctx, tag):
"""Saves your Brawl Stars tag to your Discord account."""
tag = tag.strip("#").replace("O", "0")
invalid_chars = self.check_tag(tag)
if invalid_chars:
return await ctx.send(f"That's an invalid tag! {self.bot.get_emoji(468607278313111553)}")
match = await self.bot.db.bstags.find_one({"id": ctx.author.id})
accounts = 0
if not match:
await self.bot.db.bstags.update_one({"id": ctx.author.id}, {"$set": {"tag": [tag]}}, upsert=True)
accounts = 1
elif len(match['tag']) > 10:
return await ctx.send("You can have a maximum of 10 accounts saved on this bot.")
else:
tag_list = match['tag']
tag_list.append(tag)
accounts = len(tag_list)
await self.bot.db.bstags.update_one({"id": ctx.author.id}, {"$set": {"tag": tag_list}})
await ctx.send(f"Your Brawl Stars tag has been successfully saved. {self.emoji(484897652220362752)}\n\n**Number of accounts saved:** {accounts}")
@commands.command()
async def bslisttags(self, ctx):
"""Lists all the tags saved to your Discord account."""
match = await self.bot.db.bstags.find_one({"id": ctx.author.id})
if not match:
return await ctx.send("You don't have any accounts saved on this bot!")
em = discord.Embed(color=ctx.author.color, title="Brawl Stars Accounts")
count = 1
desc = f"**Total:** {len(match['tag'])}\n\n"
for tag in match['tag']:
profile = await self.client.get_player(tag)
desc += f"`#{count}:` {profile.name} (#{profile.tag})\n"
count += 1
em.description = desc
await ctx.send(embed=em)
@commands.command()
async def bsprofile(self, ctx, tag=None):
await ctx.trigger_typing()
if not tag:
tag = await self.get_tag(ctx.author.id, 0)
if not tag:
return await ctx.send("You didn't save a Brawl Stars tag to your profile. Time to get it saved!")
else:
if re.match("[0-9]+", tag):
tag = await self.get_tag(ctx.author.id, int(tag) - 1)
if not tag:
return await ctx.send("That Brawl Stars account does not exist on your profile. Run `uwu bslisttags` to see a list of the saved accounts. To use the multi-account feature, run `uwu bsprofile [number of account on list]`")
elif re.match("^<@!?[0-9]+>$", tag):
userid = tag.strip("<@!")
userid = userid.strip(">")
try:
userid = int(userid)
except:
return await ctx.send(f"Invalid user mention. Please either mention a user, provide a Brawl Stars tag, or don't provide anything if you have your tag saved. {self.bot.get_emoji(468607278313111553)}")
tag = await self.get_tag(userid, 0)
if not tag:
return await ctx.send("That user didn't save a Brawl Stars tag to their profile!")
else:
tag = tag.strip('#')
invalid_chars = self.check_tag(tag)
if invalid_chars:
return await ctx.send(f"Invalid characters: {', '.join(invalid_chars)}")
profile = await self.client.get_player(tag)
club = profile.club
em = discord.Embed(color=0x00ff00, title=f"{profile.name} ({profile.tag})")
em.add_field(name=f"Trophies {self.emoji(523919154630361088)}", value=f"{profile.trophies}")
em.add_field(name=f"Highest Trophies {self.emoji(523919154630361088)}", value=f"{profile.highest_trophies}")
em.add_field(name=f"Power Play Points {self.emoji(704746727156219924)}", value=f"{profile.power_play_points}")
em.add_field(name=f"Highest Power Play Points {self.emoji(704746727156219924)}", value=f"{profile.highest_power_play_points}")
em.add_field(name=f"XP Level {self.emoji(523924578314092544)}", value=f"{profile.exp_level}")
em.add_field(name=f"3v3 Victories {self.emoji(523919154751733762)}", value=f"{profile.x3vs3_victories}")
em.add_field(name=f"Solo Victories {self.emoji(523923170755870720)}", value=f"{profile.solo_victories}")
em.add_field(name=f"Duo Victories {self.emoji(523923170671984656)}", value=f"{profile.duo_victories}")
em.add_field(name=f"Best Time as Big Brawler {self.emoji(523923170970042378)}", value=f"{self.fmt_time(profile.best_time_as_big_brawler)}")
em.add_field(name=f"Best Robo Rumble Time {self.emoji(523926186620092426)}", value=f"{self.fmt_time(profile.best_robo_rumble_time)}")
em.add_field(name="Brawlers", value=f"{len(profile.brawlers)}/35")
if club:
em.add_field(name="Club", value=f"{club.name} ({club.tag})")
else:
em.add_field(name="Club", value=f"No club. {self.bot.get_emoji(522524669459431430)}")
#em.set_thumbnail(url=profile.avatar_url)
await ctx.send(embed=em)
@commands.command(aliases=["bsclan"])
async def bsclub(self, ctx, tag=None):
await ctx.trigger_typing()
if not tag:
tag = await self.get_tag(ctx.author.id, 0)
if not tag:
return await ctx.send("You didn't save a Brawl Stars tag to your profile. Time to get it saved!")
profile = await self.client.get_player(tag)
club = await self.client.get_club(profile.club.tag)
else:
if re.match("[0-9]+", tag):
tag = await self.get_tag(ctx.author.id, int(tag) - 1)
if not tag:
return await ctx.send("That Brawl Stars account does not exist on your profile. Run `uwu bslisttags` to see a list of the saved accounts. To use the multi-account feature, run `uwu bsprofile [number of account on list]`")
profile = await self.client.get_player(tag)
club = await self.client.get_club(profile.club.tag)
elif re.match("^<@!?[0-9]+>$", tag):
userid = tag.strip("<@!")
userid = userid.strip(">")
try:
userid = int(userid)
except:
return await ctx.send(f"Invalid user mention. Please either mention a user, provide a Brawl Stars tag, or don't provide anything if you have your tag saved. {self.bot.get_emoji(468607278313111553)}")
tag = await self.get_tag(userid, 0)
if not tag:
return await ctx.send("That user didn't save a Brawl Stars tag to their profile!")
profile = await self.client.get_player(tag)
club = await self.client.get_club(profile.club.tag)
else:
tag = tag.strip('#')
invalid_chars = self.check_tag(tag)
if invalid_chars:
return await ctx.send(f"Invalid characters: {', '.join(invalid_chars)}")
club = await self.client.get_club(tag)
em = discord.Embed(color=ctx.author.color, title=f"{club.name} (#{club.tag})")
em.description = club.description
em.add_field(name="Trophies", value=f"{club.trophies}")
em.add_field(name="Members", value=f"**{club.members_count}**/100")
em.add_field(name="Online Members", value=f"**{club.online_members}**/{club.members_count}")
em.add_field(name="Required Trophies", value=club.required_trophies)
em.add_field(name="Status", value=club.status)
em.set_thumbnail(url=club.badge_url)
await ctx.send(embed=em)
@commands.command()
async def bsbrawlers(self, ctx, tag=None):
await ctx.trigger_typing()
if not tag:
tag = await self.get_tag(ctx.author.id, 0)
if not tag:
return await ctx.send("You didn't save a Brawl Stars tag to your profile. Time to get it saved!")
else:
if re.match("[0-9]+", tag):
tag = await self.get_tag(ctx.author.id, int(tag) - 1)
if not tag:
return await ctx.send("That Brawl Stars account does not exist on your profile. Run `uwu bslisttags` to see a list of the saved accounts. To use the multi-account feature, run `uwu bsprofile [number of account on list]`")
elif re.match("^<@!?[0-9]+>$", tag):
userid = tag.strip("<@!")
userid = userid.strip(">")
try:
userid = int(userid)
except:
return await ctx.send(f"Invalid user mention. Please either mention a user, provide a Brawl Stars tag, or don't provide anything if you have your tag saved. {self.bot.get_emoji(468607278313111553)}")
tag = await self.get_tag(userid, 0)
if not tag:
return await ctx.send("That user didn't save a Brawl Stars tag to their profile!")
else:
tag = tag.strip('#')
invalid_chars = self.check_tag(tag)
if invalid_chars:
return await ctx.send(f"Invalid characters: {', '.join(invalid_chars)}")
profile = await self.client.get_player(tag)
em1 = discord.Embed(title=f"{profile.name} | #{tag}")
em2 = discord.Embed()
average = 0
counter = 0
brawlers = self.sort_brawlers(profile.brawlers)
for x in brawlers:
rank_emoji = discord.utils.get(self.bot.get_guild(523916552014397450).emojis, name=f"r{x['rank']}")
if counter < 25:
em1.add_field(name=f"{x['name'].title()} {self.brawler(x['name'].title())}", value=f"{rank_emoji} `{x['power']}` {self.bot.get_emoji(645739308711542828) if x['power'] < 10 else self.bot.get_emoji(645762041751273512)} {x['trophies']}/{x['highestTrophies']}")
else:
em2.add_field(name=f"{x['name'].title()} {self.brawler(x['name'].title())}", value=f"{rank_emoji} `{x['power']}` {self.bot.get_emoji(645739308711542828) if x['power'] < 10 else self.bot.get_emoji(645762041751273512)} {x['trophies']}/{x['highestTrophies']}")
average += x["trophies"]
counter += 1
em1.description = f"""
**Brawlers:** {len(profile.brawlers)}/35
**Average Trophies:** {int(average/len(profile.brawlers))}
"""
await ctx.send(embed=em1, edit=False)
if counter >= 25:
await ctx.send(embed=em2, edit=False)
@commands.command()
async def bsseason(self, ctx, tag=None):
"""Find your end-of-season rewards and trophy loss."""
await ctx.trigger_typing()
if not tag:
tag = await self.get_tag(ctx.author.id, 0)
if not tag:
return await ctx.send("You didn't save a Brawl Stars tag to your profile. Time to get it saved!")
else:
if re.match("[0-9]+", tag):
tag = await self.get_tag(ctx.author.id, int(tag) - 1)
if not tag:
return await ctx.send("That Brawl Stars account does not exist on your profile. Run `uwu bslisttags` to see a list of the | |
import os, sys, re
import pickle
import pandas as pd
import hashlib
class Drug(object):
"""
Class defining a Drug object
"""
def __init__(self, drug_name):
"""
@param: drug_name
@pdef: Name of the drug
@ptype: {String}
@raises: {IncorrectTypeID} if the method translate_network is used with
a network of type_id different from 'biana'
"""
self.drug_name = drug_name.lower()
self.type_name = self.recognize_name(drug_name.lower())
self.targets = []
self.targets_in_network = []
self.pfams = []
self.smiles = []
self.ATCs = []
self.level_to_ATCs = {'level1':[], 'level2':[], 'level3':[], 'level4':[], 'level5':[]}
self.SEs = []
self.target_type_id = None
self.target_type_id_to_table = {
'geneid' : 'externalEntityGeneID',
'genesymbol' : 'externalEntityGeneSymbol',
'uniprotentry' : 'externalEntityUniprotEntry',
'uniprotaccession' : 'externalEntityUniprotAccession',
}
self.type_name_to_table = {
'name' : 'externalEntityName',
'drugbankid' : 'externalEntityDrugBankID',
'dcdb' : 'externalEntityDCDB_drugID',
'chemblid' : 'externalEntityCHEMBL',
'pubchemcompound' : 'externalEntityPubChemCompound',
}
###########
# METHODS #
###########
def obtain_targets_from_file(self, targets_file, target_type_id):
"""
Obtains the targets from an input file and stores them into a list.
The file must contain the names of the targets separated by new lines.
The type of ID of the targets must be specified.
"""
self.target_type_id = target_type_id.lower() # Annotate the type of ID of the targets
with open(targets_file, 'r') as targets_file_fd:
for line in targets_file_fd:
self.targets.append(line.strip())
# Check if the number of targets provided is sufficient for the analysis
if len(self.targets) < 1:
raise InsufficientTargets(self.targets)
return
def obtain_targets_from_pickle(self, drug2targets_file, target_type_id):
"""
Obtains the targets from an input pickle file and stores them into a list.
"""
self.target_type_id = target_type_id.lower() # Annotate the type of ID of the targets
drug2targets = pickle.load(open(drug2targets_file))
drug_id = self.drug_name.upper()
if drug_id in drug2targets:
self.targets = list(drug2targets[drug_id])
else:
raise InsufficientTargets(self.targets)
# Check if the number of targets provided is sufficient for the analysis
if len(self.targets) < 1:
raise InsufficientTargets(self.targets)
return
def obtain_drugbankids_from_table(self, drug_mapping_file):
"""
Obtains the drugbankids of a drug from an input table and stores them into a list.
Usually, there is only one drugbankid, but there could be multiple ones in some occasions.
"""
# Get DrugBankID for the input drug
if self.type_name != 'drugbankid':
drug_mapping_df = pd.read_csv(drug_mapping_file, sep='\t', index_col=None)
if self.type_name == 'name':
# Select drugbank ids with the input name
drugnames_df = drug_mapping_df[(drug_mapping_df['type_identifier'] == 'name') & (drug_mapping_df['identifier'] == self.drug_name)]
drugbankids = set(drugnames_df['#drugbankid'].tolist())
if len(drugbankids) == 0:
raise DrugNameNotFound(self.drug_name, self.type_name)
elif len(drugbankids) > 1:
# Check if the input name is unique
if 'unique' in drugnames_df['type_name'].tolist():
drugbankids = set(drugnames_df.loc[drugnames_df['type_name'] == 'unique', '#drugbankid'].tolist())
if len(drugbankids) == 0:
drugbankids = set(drugnames_df['#drugbankid'].tolist())
else:
drugbankids = set(drug_mapping_df.loc[(drug_mapping_df['type_identifier'] == self.type_name) & (drug_mapping_df['identifier'] == self.drug_name), '#drugbankid'].tolist())
if len(drugbankids) == 0:
raise DrugNameNotFound(self.drug_name, self.type_name)
else:
drugbankids = [self.drug_name.upper()]
return drugbankids
def obtain_targets_from_table(self, drugbankids, drug_to_targets_file, target_type_id='geneid'):
"""
Obtains the targets from an input table and stores them into a list.
"""
self.target_type_id = target_type_id.lower() # Annotate the type of ID of the targets
# Get targets
targets = set()
drug_to_targets_df = pd.read_csv(drug_to_targets_file, sep='\t', index_col=None)
for group_targets in drug_to_targets_df.loc[drug_to_targets_df['#drugbankid'].isin(drugbankids), 'geneids'].tolist():
targets = targets | set(group_targets.split('; '))
# Check if the number of targets provided is sufficient for the analysis
if len(targets) < 1:
raise InsufficientTargets(self.targets)
else:
self.targets = targets
return
def obtain_targets_from_BIANA(self, biana_cnx, target_type_id, unification_protocol):
"""
Obtains the targets from BIANA database using as query the drug name.
The type of ID of the targets must be specified.
"biana_cnx" parameter stands for the variable containing the connexion to the MySQL database.
"""
self.target_type_id = target_type_id.lower() # Annotate the type of ID of the targets
target_type_id_table = self.return_targets_biana_table(self.target_type_id) # Obtain the table containing the type of ID introduced
type_name_table = self.return_drug_biana_table(self.type_name) # Obtain the table containing the type of name introduced
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
cursor = biana_cnx.cursor() # Start cursor to MySQL
# Select the external entity ID of the DCDB drug
query1 = (''' SELECT externalEntityID FROM {} WHERE value = %s
'''.format(type_name_table))
# Select the geneID targets of the drug, only therapeutic ones, and only from DrugBank database!
query2 = (''' SELECT G.value FROM externalEntity E1, {} U1, {} U2, externalEntity E2, externalEntityRelationParticipant R2, externalEntityRelationParticipant R3, externalEntityDrugBank_targetID T, {} U3, {} U4, externalEntityGeneID G
WHERE E1.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = E2.externalEntityID AND E2.type = 'drug'
AND E2.externalEntityID = R2.externalEntityID AND R2.externalEntityRelationID = R3.externalEntityRelationID AND R3.externalEntityID = T.externalEntityID AND T.targetType = "therapeutic"
AND R3.externalEntityID = U3.externalEntityID AND U3.userEntityID = U4.userEntityID AND U4.externalEntityID = G.externalEntityID AND E1.externalEntityID = %s
'''.format(up_table, up_table, up_table, up_table, target_type_id_table))
cursor.execute(query1, (self.drug_name,))
external_entities = set()
geneids = set()
# Search for the external entities corresponding to the name of the drug
for items in cursor:
for ee in items:
external_entities.add(ee)
# Search for the geneIDs interacting with the drug
if len(external_entities) > 0:
for ee in external_entities:
cursor.execute(query2, (ee,))
for items in cursor:
for geneid in items:
geneids.add(geneid)
else:
raise DrugNameNotFound(self.drug_name, self.type_name)
# Why in two steps?
# Because as the table "externalEntityName" is too large, do one complex command can be very time-consuming
# It is better to split the search in two commands
cursor.close()
self.targets = list(geneids)
# Check if the number of targets provided is sufficient for the analysis
if len(self.targets) < 1:
raise InsufficientTargets(self.targets)
return
def recognize_name(self, drug_name):
"""
Recognizes the type of name of the drug
(dcdb, drugbank or name)
"""
dcdb_pattern = re.compile('^dcc[0-9]{4}$')
drugbank_pattern = re.compile('^db[0-9]{5}$')
chembl_pattern = re.compile('^chembl[0-9]+$')
pubchem_pattern = re.compile('^[0-9]+$')
diana_pattern = re.compile('^diana_.*$')
if dcdb_pattern.match(drug_name):
self.drug_name = drug_name.upper()
return 'dcdb'
elif drugbank_pattern.match(drug_name):
self.drug_name = drug_name.upper()
return 'drugbankid'
elif chembl_pattern.match(drug_name):
self.drug_name = drug_name.upper()
return 'chemblid'
elif pubchem_pattern.match(drug_name):
return 'pubchemcompound'
elif diana_pattern.match(drug_name):
return 'diana'
else:
return 'name'
def return_drug_biana_table(self, type_name):
"""
Returns the table in BIANA where the type of drug name
introduced is stored.
"""
if type_name in self.type_name_to_table:
return self.type_name_to_table[type_name]
def return_targets_biana_table(self, target_type_id):
"""
Returns the table in BIANA where the annotations of the type of ID
introduced are stored.
"""
if target_type_id in self.target_type_id_to_table:
return self.target_type_id_to_table[target_type_id]
else:
raise IncorrectTypeID(target_type_id, self.target_type_id_to_table)
def obtain_pfams_from_file(self, pfam_file):
"""
Obtains the pfams from an input file and stores them into a list.
The file must contain the names of the pfams separated by new lines.
"""
with open(pfam_file, 'r') as pfam_file_fd:
for line in pfam_file_fd:
self.pfams.append(line.strip())
return
def obtain_pfams_from_pickle(self, pfam_pickle_file, output_file):
"""
Obtains the pfams from an input pickle file and stores them into a list.
"""
geneid2pfam = pickle.load(open(pfam_pickle_file))
all_pfams = set()
for target in self.targets:
if target in geneid2pfam:
pfams = geneid2pfam[target]
for pfam in pfams:
all_pfams.add(pfam)
if len(all_pfams) > 0:
self.pfams = list(all_pfams)
with open(output_file, 'w') as pfam_fd:
for pfam in self.pfams:
pfam_fd.write('{}\n'.format(pfam))
else:
print('No PFAMS found for the targets introduced: {}.\n'.format(', '.join(self.targets)))
return
def obtain_pfams_from_geneid_target_table(self, geneids, geneid_target_mapping_file):
"""
Obtains the pfams of a list of targets (in gene ID) from an input table and stores them into a list.
"""
# Get pfams
geneid_mappings_df = pd.read_csv(geneid_target_mapping_file, sep='\t', index_col=None)
pfams_df = geneid_mappings_df[(geneid_mappings_df['#geneid'].isin(geneids)) & (geneid_mappings_df['type_identifier'] == 'pfam')]
self.pfams = set([pfam.upper() for pfam in pfams_df['identifier'].tolist()])
return
def obtain_pfams_from_targets(self, biana_cnx, output_file, unification_protocol):
"""
Obtains the pfams from BIANA database using as query the targets.
"biana_cnx" parameter stands for the variable containing the connexion to the MySQL database.
Stores the PFAMs found in an output file
"""
target_type_id_table = self.return_targets_biana_table(self.target_type_id) # Obtain the table containing the type of ID introduced
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
query = (''' SELECT P.value FROM {} G, {} U1, {} U2, externalEntityPFAM P
WHERE G.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = P.externalEntityID AND G.value = %s
'''.format(target_type_id_table, up_table, up_table))
if len(self.targets) > 0:
cursor = biana_cnx.cursor() # Start cursor to MySQL
for target in self.targets:
cursor.execute(query, (target,))
pfams = set()
for items in cursor:
for pfam in items:
pfams.add(pfam.upper())
cursor.close()
else:
print('There are no targets, so it is impossible to get the PFAMs!\n')
sys.exit(10)
if len(pfams) > 0:
self.pfams = list(pfams)
with open(output_file, 'w') as pfam_fd:
for pfam in self.pfams:
pfam_fd.write('{}\n'.format(pfam))
else:
print('No PFAMS found for the targets introduced: {}.\n'.format(', '.join(self.targets)))
return
def obtain_SMILES_from_file(self, smiles_file):
"""
Obtains the SMILES from an input file and stores them into a list.
The file must contain | |
<gh_stars>0
import os, math
import time
import numpy as np
import pandas as pd
import geopy.distance
from concurrent.futures import ProcessPoolExecutor
'''
Input: "xxx.csv" with first column as "Date"
Output: the saved files for preprocessed datasets, i.e., "train/val/test_dataTime.npz" including:
- x
- dateTime
- y
- max_speed
- x_offsets
- y_offsets
Remarks:
- Multi-process processing for generating local statistic features of traffic data
- Costing! e.g., for PEMS-BAY, it takes more than two hours for 20% missing values
'''
def get_dist_matrix(sensor_locs):
"""
Compute the absolute spatial distance matrix
:param sensor_locs: with header and index, [index, sensor_id, longitude, latitude]
:return:
"""
sensor_ids = sensor_locs[1:, 1] # remove header and index
sensor_id_to_ind = {}
num_sensors = len(sensor_ids)
dist_mx = np.zeros((num_sensors, num_sensors), dtype=np.float32)
dist_mx[:] = np.inf
for i, sensor_id in enumerate(sensor_ids):
sensor_id_to_ind.update({sensor_id: i})
for id1 in sensor_ids:
coords_1 = sensor_locs[sensor_locs[:, 1] == id1][0][2:]
for id2 in sensor_ids:
if math.isinf(dist_mx[sensor_id_to_ind[id1], sensor_id_to_ind[id2]]):
coords_2 = sensor_locs[sensor_locs[:, 1] == id2][0][2:]
dist = round(geopy.distance.distance(coords_1, coords_2).km, 2)
dist_mx[sensor_id_to_ind[id1], sensor_id_to_ind[id2]] = dist
dist_mx[sensor_id_to_ind[id2], sensor_id_to_ind[id1]] = dist
else:
continue
return sensor_ids, sensor_id_to_ind, dist_mx
def cal_statistics(idx): # index in (N, L, D)
global speed_sequences
global Delta_t, X_last_obsv, Delta_s, X_closest_obsv, X_mean_t, X_mean_s, missing_index
global dists_one_all_array, sorted_node_ids_array
i = missing_index[0][idx]
j = missing_index[1][idx]
k = missing_index[2][idx]
# Delta_t, X_last_obsv
## ******* May cause problem when computing Delta_t due to the random computing order.
if j != 0 and j != speed_sequences.shape[1] - 1: # if the missing value is in the middle of the sequence
Delta_t[i, j + 1, k] = Delta_t[i, j + 1, k] + Delta_t[i, j, k]
if j != 0:
X_last_obsv[i, j, k] = X_last_obsv[
i, j - 1, k] # last observation, can be zero, problem when handling long-range missing values
# Delta_s, X_closest_obsv
dists_one_all = dists_one_all_array[k] # [(idx, dist)]
for triple in dists_one_all:
idx = triple[0]
dist = triple[1]
if speed_sequences[i, j, idx] != 0:
Delta_s[i, j, k] = dist
X_closest_obsv[i, j, k] = speed_sequences[i, j, idx]
break
else:
continue
# X_mean_s
sorted_node_ids = sorted_node_ids_array[k]
spatial_neighbor = speed_sequences[i, j, sorted_node_ids] # S measures
nonzero_index = np.nonzero(spatial_neighbor) # return x arrays, for each we have xx elements
if len(nonzero_index[0]) != 0:
nonzero_spatial_neighbor = spatial_neighbor[nonzero_index]
X_mean_s[i, j, k] = np.mean(nonzero_spatial_neighbor)
return idx
def prepare_dataset(output_dit, df, x_offsets, y_offsets, masking, dists, L, S, mask_ones_proportion=0.8):
"""
Prepare training & testing data integrating local statistic features
:param output_dit: output path for saving
:param df: (N, D), i.e., (num_samples, num_nodes)
:param x_offsets: range(-11, 1)
:param y_offsets: range(1, 13)
:param masking:
:param dists: the distance matrix (N, N) for the sensor nodes; directed or undirected
:param L: the number of previous temporal measures to check
:param S: the number of nearby spatial measures to check
:param mask_ones_proportion:
:return:
x: (N, 8, L, D) including (x, Mask, X_last_obsv, X_mean_t, Delta_t, X_closest_obsv, X_mean_s, Delta_s)
dateTime: (N, L)
y: (N, L, D)
"""
global speed_sequences
global Delta_t, X_last_obsv, Delta_s, X_closest_obsv, X_mean_t, X_mean_s, missing_index
global dists_one_all_array, sorted_node_ids_array
num_samples, num_nodes = df.shape
data = df.values # (num_samples, num_nodes)
speed_tensor = data.clip(0, 100) # (N, D)
max_speed = speed_tensor.max().max()
speed_tensor = speed_tensor / max_speed # (N, D)
date_array = df.index.values # (N)
print(speed_tensor.shape, date_array.shape)
x, dateTime, y = [], [], []
min_t = abs(min(x_offsets))
max_t = abs(num_samples - abs(max(y_offsets))) # Exclusive
for t in range(min_t, max_t):
x_t = speed_tensor[t + x_offsets, ...]
dateTime_t = date_array[t + x_offsets]
y_t = speed_tensor[t + y_offsets, ...]
x.append(x_t)
dateTime.append(dateTime_t)
y.append(y_t)
speed_sequences = np.stack(x, axis=0) # (N, L, D)
dateTime = np.stack(dateTime, axis=0) # (N, L)
speed_labels = np.stack(y, axis=0) # (N, L, D)
# using zero-one mask to randomly set elements to zeros
if masking:
print('Split Speed/label finished. Start to generate Mask, Delta_t, Last_observed_X ...')
np.random.seed(1024)
Mask = np.random.choice([0, 1], size=(speed_sequences.shape),
p=[1 - mask_ones_proportion, mask_ones_proportion]) # (N, L, D)
speed_sequences = np.multiply(speed_sequences, Mask)
# temporal information -> to consider extracting the statistic feature from longer history data (caan probablement improve the performance)
interval = 5 # 5 minutes
s = np.zeros_like(speed_sequences) # time stamps in (N, L, D)
for i in range(s.shape[1]):
s[:, i, :] = interval * i
Delta_t = np.zeros_like(
speed_sequences) # time intervals, if all previous measures are missing, Delta_t[i, j, k] = 0, X_last_obsv[i, j ,k] = 0
Delta_s = np.zeros_like(
speed_sequences) # spatial distance, if all variables are missing, Delta_s[i, j, k] = 0, X_closest_obsv[i, j ,k] = 0
X_last_obsv = np.copy(speed_sequences)
X_closest_obsv = np.copy(speed_sequences)
X_mean_t = np.zeros_like(speed_sequences)
X_mean_s = np.zeros_like(speed_sequences)
for i in range(1, s.shape[1]):
Delta_t[:, i, :] = s[:, i, :] - s[:, i - 1, :] # calculate the exact minuites
missing_index = np.where(Mask == 0) # (array1, array2, array3), length of each array: number of missing values
# X_mean_t, temporal mean for each segment
start = time.time()
nbr_all = speed_sequences.shape[0] * speed_sequences.shape[2]
nbr_finished = 0
current_ratio = 0
for i in range(speed_sequences.shape[0]): # N samples
for d in range(speed_sequences.shape[2]):
nbr_finished += 1
finished_ratio = nbr_finished // (0.01 * nbr_all)
if finished_ratio != current_ratio:
print("{}% of X_mean_t are calculated ! Accumulated time cost: {}s" \
.format(nbr_finished // (0.01 * nbr_all), time.time() - start))
current_ratio = finished_ratio
temp_neighbor = speed_sequences[i, :, d] # (L)
nonzero_index = np.nonzero(temp_neighbor) # return x arrays, for each we have xx elements
if len(nonzero_index[0]) == 0:
continue
else:
nonzero_temp_neighbor = temp_neighbor[nonzero_index]
avg = np.mean(nonzero_temp_neighbor, keepdims=True)
X_mean_t[i, :, d] = np.tile(avg, X_mean_t.shape[1])
print("total time cost {}".format(time.time() - start))
# save X_mean_t into ".npz" file
X_mean_t_save_path = os.path.join(output_dit,
"XMeanT_missRatio_{:.2f}%.npz".format((1 - mask_ones_proportion) * 100))
np.savez_compressed(
X_mean_t_save_path,
X_mean_t=X_mean_t
)
print("X_mean_t is saved in ", X_mean_t_save_path)
# spatial information
dists_one_all_array = []
sorted_node_ids_array = []
for d in range(speed_sequences.shape[2]):
dists_one_all = dists[d] # the distance array between node k and all other nodes
dists_one_all = list(enumerate(dists_one_all)) # [(idx, dist)]
dists_one_all = sorted(dists_one_all, key=lambda x: x[1]) # by default ascending order
sorted_node_ids = [x[0] for x in dists_one_all[:S]] # only take S nearest nodes
dists_one_all_array.append(dists_one_all)
sorted_node_ids_array.append(sorted_node_ids)
executor = ProcessPoolExecutor() # default, using nbr_CPU processes
idx_miss = list(range(missing_index[0].shape[0]))
nbr_all = len(idx_miss)
nbr_temp = 0
start = time.time()
current_ratio = 0
for res in executor.map(cal_statistics, idx_miss):
nbr_temp += 1
finished_ratio = nbr_temp // (0.01 * nbr_all)
if finished_ratio != current_ratio:
end = time.time()
print("{} % of the statistic features are calculated ! Time cost: {}s".format(
nbr_temp // (0.01 * nbr_all), end - start))
current_ratio = finished_ratio
print('Generate Mask, Last/Closest_observed_X, X_mean_t/s, Delta_t/s finished.')
if masking:
speed_sequences = np.expand_dims(speed_sequences, axis=1)
Mask = np.expand_dims(Mask, axis=1)
X_last_obsv = np.expand_dims(X_last_obsv, axis=1)
X_closest_obsv = np.expand_dims(X_closest_obsv, axis=1)
X_mean_t = np.expand_dims(X_mean_t, axis=1)
X_mean_s = np.expand_dims(X_mean_s, axis=1)
Delta_t = np.expand_dims(Delta_t, axis=1)
Delta_s = np.expand_dims(Delta_s, axis=1)
dataset_agger = np.concatenate(
(speed_sequences, Mask, X_last_obsv, X_mean_t, Delta_t, X_closest_obsv, X_mean_s, Delta_s),
axis=1) # (N, 8, L, D)
return dataset_agger, dateTime, speed_labels, max_speed # (N, 8, L, D), (N, L), (N, L, D)
else:
return speed_sequences, dateTime, speed_labels, max_speed # (N, L, D), (N, L), (N, L, D)
def generate_train_val_test(traffic_df_filename, dist_filename, output_dir, masking, L, S,
train_val_test_split=[0.7, 0.1, 0.2], mask_ones_proportion=0.8):
"""
To generate the splitted datasets
:param traffic_df_filename:
:param dist_file: distance matrix file
:param output_dir: the path to save generated datasets
:param masking: default True
:param L: the recent sample numbers
:param S: the nearby node numbers
:param train_val_test_split: the splitting ratio
:param mask_ones_proportion: the masking ratio
:return:
df: (N_all, D), the full dataframe including "dateTime" ass the first column
save datasets into ".npz" files
# x: (N, 8, L, D)
# dateTime: (N, L)
# y: (N, L, D)
"""
df = pd.read_hdf(traffic_df_filename)
sensor_locs = np.genfromtxt(dist_filename, delimiter=',')
sensor_ids, sensor_id_to_ind, dist_mx = get_dist_matrix(sensor_locs)
x_offsets = np.sort(
np.concatenate((np.arange(-11, 1, 1),))
)
# Predict the next one hour
y_offsets = np.sort(np.arange(1, 13, 1))
# x: (N, 8, L, D)
# dateTime: (N, L)
# y: (N, L, D)
x, dateTime, y, max_speed = prepare_dataset(
output_dir,
df,
x_offsets,
y_offsets,
masking,
dist_mx,
L,
S,
mask_ones_proportion
)
print("x shape: ", x.shape, "dateTime shape: ", dateTime.shape, ", y shape: ", y.shape)
# Write the data into npz file.
num_samples = x.shape[0]
num_train = round(num_samples * | |
),
$result = UNIONALL($optional__0, $optional__1)
"""
expected_gremlin = """
g.V('@class', 'Animal')
.filter{it, m -> it.name.contains($wanted)}
.as('Animal___1')
.ifThenElse{it.in_Animal_ParentOf == null}{null}{it.in('Animal_ParentOf')}
.as('Animal__in_Animal_ParentOf___1')
.ifThenElse{it == null}{null}{it.in('Animal_ParentOf')}
.as('Animal__in_Animal_ParentOf__in_Animal_ParentOf___1')
.back('Animal__in_Animal_ParentOf___1')
.optional('Animal___1')
.as('Animal___2')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
child_name: (
(m.Animal__in_Animal_ParentOf___1 != null) ?
m.Animal__in_Animal_ParentOf___1.name : null
),
grandchild_name: (
(m.Animal__in_Animal_ParentOf__in_Animal_ParentOf___1 != null) ?
m.Animal__in_Animal_ParentOf__in_Animal_ParentOf___1.name : null
),
name: m.Animal___1.name
])}
"""
expected_mssql = """
SELECT
[Animal_1].name AS child_name,
[Animal_2].name AS grandchild_name,
[Animal_3].name AS name
FROM
db_1.schema_1.[Animal] AS [Animal_3]
LEFT OUTER JOIN db_1.schema_1.[Animal] AS [Animal_1]
ON [Animal_3].parent = [Animal_1].uuid
LEFT OUTER JOIN db_1.schema_1.[Animal] AS [Animal_2]
ON [Animal_1].parent = [Animal_2].uuid
WHERE (
[Animal_3].name LIKE '%' + :wanted + '%'
) AND (
[Animal_2].uuid IS NOT NULL OR
[Animal_1].uuid IS NULL
)
"""
expected_cypher = SKIP_TEST
expected_postgresql = """
SELECT
"Animal_1".name AS child_name,
"Animal_2".name AS grandchild_name,
"Animal_3".name AS name
FROM
schema_1."Animal" AS "Animal_3"
LEFT OUTER JOIN schema_1."Animal" AS "Animal_1"
ON "Animal_3".parent = "Animal_1".uuid
LEFT OUTER JOIN schema_1."Animal" AS "Animal_2"
ON "Animal_1".parent = "Animal_2".uuid
WHERE (
"Animal_3".name LIKE '%%' || %(wanted)s || '%%'
) AND (
"Animal_2".uuid IS NOT NULL OR
"Animal_1".uuid IS NULL
)
"""
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_optional_and_deep_traverse(self) -> None:
test_data = test_input_data.optional_and_deep_traverse()
expected_match = """
SELECT EXPAND($result)
LET
$optional__0 = (
SELECT
Animal___1.name AS `animal_name`
FROM (
MATCH {{
class: Animal,
where: ((
(in_Animal_ParentOf IS null)
OR
(in_Animal_ParentOf.size() = 0)
)),
as: Animal___1
}}
RETURN $matches
)
),
$optional__1 = (
SELECT
Animal___1.name AS `animal_name`,
Animal__in_Animal_ParentOf___1.name AS `child_name`,
Animal__in_Animal_ParentOf__out_Animal_ParentOf___1.name
AS `spouse_and_self_name`,
Animal__in_Animal_ParentOf__out_Animal_ParentOf__out_Animal_OfSpecies___1.name
AS `spouse_species`
FROM (
MATCH {{
class: Animal,
as: Animal___1
}}.in('Animal_ParentOf') {{
class: Animal,
as: Animal__in_Animal_ParentOf___1
}}.out('Animal_ParentOf') {{
class: Animal,
as: Animal__in_Animal_ParentOf__out_Animal_ParentOf___1
}}.out('Animal_OfSpecies') {{
class: Species,
as: Animal__in_Animal_ParentOf__out_Animal_ParentOf
__out_Animal_OfSpecies___1
}}
RETURN $matches
)
),
$result = UNIONALL($optional__0, $optional__1)
"""
expected_gremlin = """
g.V('@class', 'Animal')
.as('Animal___1')
.ifThenElse{it.in_Animal_ParentOf == null}{null}{it.in('Animal_ParentOf')}
.as('Animal__in_Animal_ParentOf___1')
.ifThenElse{it == null}{null}{it.out('Animal_ParentOf')}
.as('Animal__in_Animal_ParentOf__out_Animal_ParentOf___1')
.ifThenElse{it == null}{null}{it.out('Animal_OfSpecies')}
.as('Animal__in_Animal_ParentOf__out_Animal_ParentOf
__out_Animal_OfSpecies___1')
.back('Animal__in_Animal_ParentOf__out_Animal_ParentOf___1')
.back('Animal__in_Animal_ParentOf___1')
.optional('Animal___1')
.as('Animal___2')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name,
child_name: (
(m.Animal__in_Animal_ParentOf___1 != null) ?
m.Animal__in_Animal_ParentOf___1.name : null
),
spouse_and_self_name: (
(m.Animal__in_Animal_ParentOf__out_Animal_ParentOf___1 != null) ?
m.Animal__in_Animal_ParentOf__out_Animal_ParentOf___1.name : null
),
spouse_species: (
(m.Animal__in_Animal_ParentOf__out_Animal_ParentOf
__out_Animal_OfSpecies___1 != null) ?
m.Animal__in_Animal_ParentOf__out_Animal_ParentOf
__out_Animal_OfSpecies___1.name
: null
)
])}
"""
expected_mssql = """
SELECT
[Animal_1].name AS animal_name,
[Animal_2].name AS child_name,
[Animal_3].name AS spouse_and_self_name,
[Species_1].name AS spouse_species
FROM
db_1.schema_1.[Animal] AS [Animal_1]
LEFT OUTER JOIN db_1.schema_1.[Animal] AS [Animal_2]
ON [Animal_1].parent = [Animal_2].uuid
LEFT OUTER JOIN db_1.schema_1.[Animal] AS [Animal_3]
ON [Animal_2].uuid = [Animal_3].parent
LEFT OUTER JOIN db_1.schema_1.[Species] AS [Species_1]
ON [Animal_3].species = [Species_1].uuid
WHERE (
[Animal_3].parent IS NOT NULL OR
[Animal_2].uuid IS NULL
) AND (
[Species_1].uuid IS NOT NULL OR
[Animal_3].parent IS NULL
)
"""
expected_cypher = SKIP_TEST
expected_postgresql = """
SELECT
"Animal_1".name AS animal_name,
"Animal_2".name AS child_name,
"Animal_3".name AS spouse_and_self_name,
"Species_1".name AS spouse_species
FROM
schema_1."Animal" AS "Animal_1"
LEFT OUTER JOIN schema_1."Animal" AS "Animal_2"
ON "Animal_1".parent = "Animal_2".uuid
LEFT OUTER JOIN schema_1."Animal" AS "Animal_3"
ON "Animal_2".uuid = "Animal_3".parent
LEFT OUTER JOIN schema_1."Species" AS "Species_1"
ON "Animal_3".species = "Species_1".uuid
WHERE (
"Animal_3".parent IS NOT NULL OR
"Animal_2".uuid IS NULL
) AND (
"Species_1".uuid IS NOT NULL OR
"Animal_3".parent IS NULL
)
"""
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_traverse_and_optional_and_traverse(self) -> None:
test_data = test_input_data.traverse_and_optional_and_traverse()
expected_match = """
SELECT EXPAND($result)
LET
$optional__0 = (
SELECT
Animal___1.name AS `animal_name`,
Animal__in_Animal_ParentOf___1.name AS `child_name`
FROM (
MATCH {{
where: ((@this INSTANCEOF 'Animal')),
as: Animal___1
}}.in('Animal_ParentOf') {{
class: Animal,
where: ((
(out_Animal_ParentOf IS null)
OR
(out_Animal_ParentOf.size() = 0)
)),
as: Animal__in_Animal_ParentOf___1
}}
RETURN $matches
)
),
$optional__1 = (
SELECT
Animal___1.name AS `animal_name`,
Animal__in_Animal_ParentOf___1.name AS `child_name`,
Animal__in_Animal_ParentOf__out_Animal_ParentOf___1.name
AS `spouse_and_self_name`,
Animal__in_Animal_ParentOf__out_Animal_ParentOf__out_Animal_OfSpecies___1.name
AS `spouse_and_self_species`
FROM (
MATCH {{
class: Animal,
as: Animal___1
}}.in('Animal_ParentOf') {{
class: Animal,
as: Animal__in_Animal_ParentOf___1
}}.out('Animal_ParentOf') {{
class: Animal,
as: Animal__in_Animal_ParentOf__out_Animal_ParentOf___1
}}.out('Animal_OfSpecies') {{
class: Species,
as: Animal__in_Animal_ParentOf__out_Animal_ParentOf
__out_Animal_OfSpecies___1
}}
RETURN $matches
)
),
$result = UNIONALL($optional__0, $optional__1)
"""
expected_gremlin = """
g.V('@class',
'Animal')
.as('Animal___1')
.in('Animal_ParentOf')
.as('Animal__in_Animal_ParentOf___1')
.ifThenElse{it.out_Animal_ParentOf == null}{null}{it.out('Animal_ParentOf')}
.as('Animal__in_Animal_ParentOf__out_Animal_ParentOf___1')
.ifThenElse{it == null}{null}{it.out('Animal_OfSpecies')}
.as('Animal__in_Animal_ParentOf__out_Animal_ParentOf
__out_Animal_OfSpecies___1')
.back('Animal__in_Animal_ParentOf__out_Animal_ParentOf___1')
.optional('Animal__in_Animal_ParentOf___1')
.as('Animal__in_Animal_ParentOf___2')
.back('Animal___1')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name,
child_name: m.Animal__in_Animal_ParentOf___1.name,
spouse_and_self_name: (
(m.Animal__in_Animal_ParentOf__out_Animal_ParentOf___1 != null) ?
m.Animal__in_Animal_ParentOf__out_Animal_ParentOf___1.name : null
),
spouse_and_self_species: (
(m.Animal__in_Animal_ParentOf__out_Animal_ParentOf
__out_Animal_OfSpecies___1 != null) ?
m.Animal__in_Animal_ParentOf__out_Animal_ParentOf
__out_Animal_OfSpecies___1.name
: null
)
])}
"""
expected_mssql = """
SELECT
[Animal_1].name AS animal_name,
[Animal_2].name AS child_name,
[Animal_3].name AS spouse_and_self_name,
[Species_1].name AS spouse_and_self_species
FROM
db_1.schema_1.[Animal] AS [Animal_1]
JOIN db_1.schema_1.[Animal] AS [Animal_2]
ON [Animal_1].parent = [Animal_2].uuid
LEFT OUTER JOIN db_1.schema_1.[Animal] AS [Animal_3]
ON [Animal_2].uuid = [Animal_3].parent
LEFT OUTER JOIN db_1.schema_1.[Species] AS [Species_1]
ON [Animal_3].species = [Species_1].uuid
WHERE
[Species_1].uuid IS NOT NULL OR [Animal_3].parent IS NULL
"""
expected_cypher = SKIP_TEST
expeceted_postgresql = """
SELECT
"Animal_1".name AS animal_name,
"Animal_2".name AS child_name,
"Animal_3".name AS spouse_and_self_name,
"Species_1".name AS spouse_and_self_species
FROM
schema_1."Animal" AS "Animal_1"
JOIN schema_1."Animal" AS "Animal_2"
ON "Animal_1".parent = "Animal_2".uuid
LEFT OUTER JOIN schema_1."Animal" AS "Animal_3"
ON "Animal_2".uuid = "Animal_3".parent
LEFT OUTER JOIN schema_1."Species" AS "Species_1"
ON "Animal_3".species = "Species_1".uuid
WHERE
"Species_1".uuid IS NOT NULL OR "Animal_3".parent IS NULL
"""
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expeceted_postgresql,
)
def test_multiple_optional_traversals_with_starting_filter(self) -> None:
test_data = test_input_data.multiple_optional_traversals_with_starting_filter()
expected_match = """
SELECT EXPAND($result)
LET
$optional__0 = (
SELECT
Animal___1.name AS `animal_name`
FROM (
MATCH {{
class: Animal,
where: ((
(
(name LIKE ('%' + ({wanted} + '%')))
AND
(
(in_Animal_ParentOf IS null)
OR
(in_Animal_ParentOf.size() = 0)
)
)
AND
(
(out_Animal_ParentOf IS null)
OR
(out_Animal_ParentOf.size() = 0)
)
)),
as: Animal___1
}}
RETURN $matches
)
),
$optional__1 = (
SELECT
Animal___1.name AS `animal_name`,
Animal__out_Animal_ParentOf___1.name AS `parent_name`,
Animal__out_Animal_ParentOf__out_Animal_OfSpecies___1.name AS `parent_species`
FROM (
MATCH {{
class: Animal,
where: ((
(name LIKE ('%' + ({wanted} + '%')))
AND
(
(in_Animal_ParentOf IS null)
OR
(in_Animal_ParentOf.size() = 0)
)
)),
as: Animal___1
}} ,
{{
class: Animal,
as: Animal___1
}}.out('Animal_ParentOf') {{
as: Animal__out_Animal_ParentOf___1
}}.out('Animal_OfSpecies') {{
as: Animal__out_Animal_ParentOf__out_Animal_OfSpecies___1
}}
RETURN $matches
)
),
$optional__2 = (
SELECT
Animal___1.name AS `animal_name`,
Animal__in_Animal_ParentOf___1.name AS `child_name`,
Animal__in_Animal_ParentOf__out_Animal_ParentOf___1.name
AS `spouse_and_self_name`
FROM (
MATCH {{
class: Animal,
where: ((
(name LIKE ('%' + ({wanted} + '%')))
AND
(
(out_Animal_ParentOf IS null)
OR
(out_Animal_ParentOf.size() = 0)
)
)),
as: Animal___1
}}.in('Animal_ParentOf') {{
as: Animal__in_Animal_ParentOf___1
}}.out('Animal_ParentOf') {{
as: Animal__in_Animal_ParentOf__out_Animal_ParentOf___1
}}
RETURN $matches
)
),
$optional__3 = (
SELECT
Animal___1.name AS `animal_name`,
Animal__in_Animal_ParentOf___1.name AS `child_name`,
Animal__out_Animal_ParentOf___1.name AS `parent_name`,
Animal__out_Animal_ParentOf__out_Animal_OfSpecies___1.name AS `parent_species`,
Animal__in_Animal_ParentOf__out_Animal_ParentOf___1.name
AS `spouse_and_self_name`
FROM (
MATCH {{
class: Animal,
where: ((name LIKE ('%' + ({wanted} + '%')))),
as: Animal___1
}}.in('Animal_ParentOf') {{
as: Animal__in_Animal_ParentOf___1
}}.out('Animal_ParentOf') {{
as: Animal__in_Animal_ParentOf__out_Animal_ParentOf___1
}} ,
{{
class: Animal,
as: Animal___1
}}.out('Animal_ParentOf') {{
as: Animal__out_Animal_ParentOf___1
}}.out('Animal_OfSpecies') {{
as: Animal__out_Animal_ParentOf__out_Animal_OfSpecies___1
}}
RETURN $matches
)
),
$result = UNIONALL($optional__0, $optional__1, $optional__2, $optional__3)
"""
expected_gremlin = """
g.V('@class', 'Animal')
.filter{it, m -> it.name.contains($wanted)}
.as('Animal___1')
.ifThenElse{it.in_Animal_ParentOf == null}{null}{it.in('Animal_ParentOf')}
.as('Animal__in_Animal_ParentOf___1')
.ifThenElse{it == null}{null}{it.out('Animal_ParentOf')}
.as('Animal__in_Animal_ParentOf__out_Animal_ParentOf___1')
.back('Animal__in_Animal_ParentOf___1')
.optional('Animal___1')
.as('Animal___2')
.ifThenElse{it.out_Animal_ParentOf == null}{null}{it.out('Animal_ParentOf')}
.as('Animal__out_Animal_ParentOf___1')
.ifThenElse{it == null}{null}{it.out('Animal_OfSpecies')}
.as('Animal__out_Animal_ParentOf__out_Animal_OfSpecies___1')
.back('Animal__out_Animal_ParentOf___1')
.optional('Animal___2')
.as('Animal___3')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name,
child_name: (
(m.Animal__in_Animal_ParentOf___1 != null) ?
m.Animal__in_Animal_ParentOf___1.name : null
),
parent_name: (
(m.Animal__out_Animal_ParentOf___1 != null) ?
m.Animal__out_Animal_ParentOf___1.name : null
),
parent_species: (
(m.Animal__out_Animal_ParentOf__out_Animal_OfSpecies___1 != null) ?
m.Animal__out_Animal_ParentOf__out_Animal_OfSpecies___1.name : null
),
spouse_and_self_name: (
(m.Animal__in_Animal_ParentOf__out_Animal_ParentOf___1 != null) ?
m.Animal__in_Animal_ParentOf__out_Animal_ParentOf___1.name : null
)
])}
"""
expected_mssql = """
SELECT
[Animal_1].name AS animal_name,
[Animal_2].name AS child_name,
[Animal_3].name AS parent_name,
[Species_1].name AS parent_species,
[Animal_4].name AS spouse_and_self_name
FROM
db_1.schema_1.[Animal] AS [Animal_1]
LEFT OUTER JOIN db_1.schema_1.[Animal] AS [Animal_2]
ON [Animal_1].parent = [Animal_2].uuid
LEFT OUTER JOIN db_1.schema_1.[Animal] AS [Animal_4]
ON [Animal_2].uuid = [Animal_4].parent
LEFT OUTER JOIN db_1.schema_1.[Animal] AS [Animal_3]
ON [Animal_1].uuid = [Animal_3].parent
LEFT OUTER JOIN db_1.schema_1.[Species] AS [Species_1]
ON [Animal_3].species = [Species_1].uuid
WHERE (
[Animal_1].name LIKE '%' + :wanted + '%'
) AND (
[Animal_4].parent IS NOT NULL OR
[Animal_2].uuid IS NULL
) AND (
[Species_1].uuid IS NOT NULL OR
[Animal_3].parent IS NULL
)
"""
expected_cypher = SKIP_TEST
expected_postgresql = """
SELECT
"Animal_1".name AS animal_name,
"Animal_2".name AS child_name,
"Animal_3".name AS parent_name,
"Species_1".name AS parent_species,
"Animal_4".name AS spouse_and_self_name
FROM
schema_1."Animal" AS "Animal_1"
LEFT OUTER JOIN schema_1."Animal" AS "Animal_2"
ON "Animal_1".parent = "Animal_2".uuid
LEFT OUTER JOIN schema_1."Animal" AS "Animal_4"
ON "Animal_2".uuid = "Animal_4".parent
LEFT OUTER JOIN schema_1."Animal" AS "Animal_3"
ON "Animal_1".uuid = "Animal_3".parent
LEFT OUTER JOIN schema_1."Species" AS | |
service['ts_use'], reverse=True)
return sorted(temp, key=lambda service: service['in_use'])
# end
# master election
def service_list_fixed(self, pubs):
self._debug['policy_fi'] += 1
return sorted(pubs, key=lambda service: service['sequence'])
# end
def service_list(self, service_type, pubs):
policy = self.get_service_config(service_type, 'policy')
if policy == 'load-balance':
f = self.service_list_load_balance
elif policy == 'fixed':
f = self.service_list_fixed
else:
f = self.service_list_round_robin
return f(pubs)
# end
def api_subscribe(self):
self._debug['msg_subs'] += 1
if self._db_conn.is_restarting():
self._debug['restarting'] += 1
bottle.abort(503, 'Service Unavailable')
ctype = bottle.request.headers['content-type']
if ctype == 'application/json':
json_req = bottle.request.json
elif ctype == 'application/xml':
data = xmltodict.parse(bottle.request.body.read())
json_req = {}
for service_type, info in data.items():
json_req['service'] = service_type
json_req.update(dict(info))
else:
bottle.abort(400, e)
service_type = json_req['service']
client_id = json_req['client']
count = reqcnt = int(json_req['instances'])
client_type = json_req.get('client-type', '')
# throttle subscribe requests to prevent overload
if self._debug['cur_pend_sb'] > 100:
self._debug['throttle_subs'] += 1
response = {'ttl': 120, service_type: []}
if ctype == 'application/xml':
response = xmltodict.unparse({'response': response})
return response
self._debug['cur_pend_sb'] += 1
if self._debug['cur_pend_sb'] > self._debug['max_pend_sb']:
self._debug['max_pend_sb'] = self._debug['cur_pend_sb']
assigned_sid = set()
r = []
ttl = randint(self._args.ttl_min, self._args.ttl_max)
cl_entry = self._db_conn.lookup_client(service_type, client_id)
if not cl_entry:
cl_entry = {
'instances': count,
'remote': bottle.request.environ.get('REMOTE_ADDR'),
'client_type': client_type,
}
self.create_sub_data(client_id, service_type)
self._db_conn.insert_client_data(service_type, client_id, cl_entry)
sdata = self.get_sub_data(client_id, service_type)
if sdata:
sdata['ttl_expires'] += 1
# need to send short ttl?
pubs = self._db_conn.lookup_service(service_type) or []
pubs_active = [item for item in pubs if not self.service_expired(item)]
if len(pubs_active) < reqcnt:
ttl_short = self.get_service_config(service_type, 'ttl_short')
if ttl_short:
ttl = self.get_ttl_short( client_id, service_type, ttl_short)
self._debug['ttl_short'] += 1
# check existing subscriptions
subs = self._db_conn.lookup_subscription(service_type, client_id) or []
self.syslog(
'subscribe: service type=%s, client=%s:%s, ttl=%d, asked=%d pubs=%d/%d, subs=%d'
% (service_type, client_type, client_id, ttl, count,
len(pubs), len(pubs_active), len(subs)))
if subs:
for service_id, result in subs:
entry = self._db_conn.lookup_service(
service_type, service_id=service_id)
# previously published service is gone
if entry is None:
continue
# or just not reachable
if self.service_expired(entry):
continue
self._db_conn.insert_client(
service_type, service_id, client_id, result, ttl)
r.append(result)
assigned_sid.add(service_id)
count -= 1
if count == 0:
response = {'ttl': ttl, service_type: r}
if ctype == 'application/xml':
response = xmltodict.unparse({'response': response})
self._debug['cur_pend_sb'] -= 1
return response
# find least loaded instances
pubs = self.service_list(service_type, pubs_active)
# prepare response - send all if count 0
for index in range(len(pubs)):
entry = pubs[index]
# skip duplicates - could happen if some publishers have quit and
# we have already picked up others from cached information above
if entry['service_id'] in assigned_sid:
continue
assigned_sid.add(entry['service_id'])
result = entry['info']
r.append(result)
self.syslog(' assign service=%s, info=%s' %
(entry['service_id'], json.dumps(result)))
# create client entry
self._db_conn.insert_client(
service_type, entry['service_id'], client_id, result, ttl)
# update publisher entry
entry['ts_use'] = self._ts_use
self._ts_use += 1
self._db_conn.update_service(
service_type, entry['service_id'], entry)
count -= 1
if count == 0:
break
response = {'ttl': ttl, service_type: r}
if ctype == 'application/xml':
response = xmltodict.unparse({'response': response})
self._debug['cur_pend_sb'] -= 1
return response
# end api_subscribe
def api_query(self):
self._debug['msg_query'] += 1
if self._db_conn.is_restarting():
self._debug['restarting'] += 1
bottle.abort(503, 'Service Unavailable')
ctype = bottle.request.headers['content-type']
if ctype == 'application/json':
json_req = bottle.request.json
elif ctype == 'application/xml':
data = xmltodict.parse(bottle.request.body.read())
json_req = {}
for service_type, info in data.items():
json_req['service'] = service_type
json_req.update(dict(info))
else:
bottle.abort(400, e)
service_type = json_req['service']
count = int(json_req['instances'])
r = []
# lookup publishers of the service
pubs = self._db_conn.query_service(service_type)
if not pubs:
return {service_type: r}
# eliminate inactive services
pubs_active = [item for item in pubs if not self.service_expired(item)]
self.syslog(' query: Found %s publishers, %d active, need %d' %
(len(pubs), len(pubs_active), count))
# find least loaded instances
pubs = pubs_active
# prepare response - send all if count 0
for index in range(min(count, len(pubs)) if count else len(pubs)):
entry = pubs[index]
result = entry['info']
r.append(result)
self.syslog(' assign service=%s, info=%s' %
(entry['service_id'], json.dumps(result)))
# don't update pubsub data if we are sending entire list
if count == 0:
continue
response = {service_type: r}
if ctype == 'application/xml':
response = xmltodict.unparse({'response': response})
return response
# end api_subscribe
def show_all_services(self, service_type=None):
rsp = output.display_user_menu()
rsp += ' <table border="1" cellpadding="1" cellspacing="0">\n'
rsp += ' <tr>\n'
rsp += ' <td>Service Type</td>\n'
rsp += ' <td>Remote IP</td>\n'
rsp += ' <td>Service Id</td>\n'
rsp += ' <td>Provision State</td>\n'
rsp += ' <td>Admin State</td>\n'
rsp += ' <td>In Use</td>\n'
rsp += ' <td>Time since last Heartbeat</td>\n'
rsp += ' </tr>\n'
# lookup publishers of the service
if service_type:
pubs = self._db_conn.lookup_service(service_type)
else:
pubs = self._db_conn.get_all_services()
if not pubs:
return rsp
for pub in pubs:
info = pub['info']
rsp += ' <tr>\n'
if service_type:
rsp += ' <td>' + pub['service_type'] + '</td>\n'
else:
link = do_html_url("/services/%s" %
(pub['service_type']), pub['service_type'])
rsp += ' <td>' + link + '</td>\n'
rsp += ' <td>' + pub['remote'] + '</td>\n'
sig = pub['service_id'] + ':' + pub['service_type']
link = do_html_url("/service/%s/brief" % sig, sig)
rsp += ' <td>' + link + '</td>\n'
rsp += ' <td>' + pub['prov_state'] + '</td>\n'
rsp += ' <td>' + pub['admin_state'] + '</td>\n'
rsp += ' <td>' + str(pub['in_use']) + '</td>\n'
(expired, color, timedelta) = self.service_expired(
pub, include_color=True)
#status = "down" if expired else "up"
rsp += ' <td bgcolor=%s>' % (
color) + str(timedelta) + '</td>\n'
rsp += ' </tr>\n'
rsp += ' </table>\n'
return rsp
# end show_services
def services_json(self, service_type=None):
rsp = []
# lookup publishers of the service
if service_type:
pubs = self._db_conn.lookup_service(service_type)
else:
pubs = self._db_conn.get_all_services()
if not pubs:
return {'services': rsp}
for pub in pubs:
entry = pub.copy()
entry['status'] = "down" if self.service_expired(entry) else "up"
entry['hbcount'] = 0
# send unique service ID (hash or service endpoint + type)
entry['service_id'] = str(entry['service_id'] + ':' + entry['service_type'])
rsp.append(entry)
return {'services': rsp}
# end services_json
def service_http_put(self, id):
self.syslog('Update service %s' % (id))
try:
json_req = bottle.request.json
service_type = json_req['service_type']
self.syslog('Entry %s' % (json_req))
except (ValueError, KeyError, TypeError) as e:
bottle.abort(400, e)
entry = self._db_conn.lookup_service(service_type, service_id=id)
if not entry:
bottle.abort(405, 'Unknown service')
if 'admin_state' in json_req:
entry['admin_state'] = json_req['admin_state']
self._db_conn.update_service(service_type, id, entry)
self.syslog('update service=%s, sid=%s, info=%s'
% (service_type, id, entry))
return {}
# end service_http_put
def service_http_delete(self, id):
info = id.split(':')
service_type = info[1]
service_id = info[0]
self.syslog('Delete service %s:%s' % (service_id, service_type))
entry = self._db_conn.lookup_service(service_type, service_id)
if not entry:
bottle.abort(405, 'Unknown service')
entry['admin_state'] = 'down'
self._db_conn.update_service(service_type, service_id, entry)
self.syslog('delete service=%s, sid=%s, info=%s'
% (service_type, service_id, entry))
return {}
# end service_http_delete
# return service info - meta as well as published data
def service_http_get(self, id):
info = id.split(':')
service_type = info[1]
service_id = info[0]
pub = self._db_conn.lookup_service(service_type, service_id)
if pub:
entry = pub.copy()
entry['hbcount'] = 0
entry['status'] = "down" if self.service_expired(entry) else "up"
return entry
# end service_http_get
# return service info - only published data
def service_brief_http_get(self, id):
info = id.split(':')
service_type = info[1]
service_id = info[0]
entry = self._db_conn.lookup_service(service_type, service_id)
if entry:
return entry['info']
else:
return 'Unknown service %s' % id
# end service_brief_http_get
# purge expired publishers
def cleanup_http_get(self):
pubs = self._db_conn.get_all_services()
for entry in pubs:
if self.service_expired(entry):
self._db_conn.delete_service(entry)
return self.show_all_services()
#end
def show_all_clients(self):
rsp = output.display_user_menu()
rsp += ' <table border="1" cellpadding="1" cellspacing="0">\n'
rsp += ' <tr>\n'
rsp += ' <td>Client IP</td>\n'
rsp += ' <td>Client Type</td>\n'
rsp += ' <td>Client Id</td>\n'
rsp += ' <td>Service Type</td>\n'
rsp += ' <td>Service Id</td>\n'
rsp += ' <td>TTL (sec)</td>\n'
rsp += ' <td>Time Remaining</td>\n'
rsp += ' <td>Refresh Count</td>\n'
rsp += ' </tr>\n'
# lookup subscribers of the service
clients = self._db_conn.get_all_clients()
if not clients:
return rsp
for client in clients:
(service_type, client_id, service_id, mtime, ttl) = client
cl_entry = self._db_conn.lookup_client(service_type, client_id)
if cl_entry is None:
continue
sdata = self.get_sub_data(client_id, service_type)
if sdata is None:
self.syslog('Missing sdata for client %s, service %s' %
(client_id, service_type))
continue
rsp += ' <tr>\n'
rsp += ' <td>' + cl_entry['remote'] + '</td>\n'
client_type = cl_entry.get('client_type', '')
rsp += ' <td>' + client_type + '</td>\n'
rsp += ' <td>' + client_id + '</td>\n'
rsp += ' <td>' + service_type + '</td>\n'
sig = service_id + ':' + service_type
link = do_html_url("service/%s/brief" % (sig), sig)
rsp += ' <td>' + link + '</td>\n'
rsp += | |
if (branch_path[i][1] == "transloss" and
branch_path[i][0] == branch_path[i+1][0])]
for i in remove:
del branch_path[i]
brecon[node] = branch_path
# remove unused nodes from brecon
for node in brecon.keys():
if node.name not in tree:
del brecon[node]
return doomed
def add_implied_spec_nodes_brecon(tree, brecon):
"""
adds speciation nodes to tree that are implied but are not present
because of gene losses
"""
for node, events in brecon.items():
for sp, event in events:
if event == "specloss":
parent = node.parent
children = parent.children
node2 = tree.new_node()
node2.parent = parent
children[children.index(node)] = node2
node.parent = node2
node2.children.append(node)
brecon[node2] = [[sp, "spec"]]
elif event == "transloss":
parent = node.parent
children = parent.children
node2 = tree.new_node()
node2.parent = parent
children[children.index(node)] = node2
node.parent = node2
node2.children.append(node)
brecon[node2] = [[sp, "trans"]]
brecon[node] = events[-1:]
def write_brecon(out, brecon):
"""
Writes a branch reconciliation to file
"""
for node, branch_path in brecon.iteritems():
out.write(str(node.name))
for snode, event in branch_path:
out.write("\t" + str(snode.name) + "\t" + event)
out.write("\n")
def read_brecon(infile, tree, stree):
"""
Reads branch reconciliation from file
"""
brecon = {}
for line in infile:
tokens = line.rstrip().split("\t")
# parse node
node_name = tokens[0]
if node_name.isdigit():
node_name = int(node_name)
node = tree[node_name]
events = []
for i in range(1, len(tokens), 2):
snode_name = tokens[i]
event = tokens[i+1]
if snode_name.isdigit():
snode_name = int(snode_name)
snode = stree[snode_name]
events.append([snode, event])
brecon[node] = events
return brecon
def find_bevents(brecon):
"""
Iterates over branch events (bevents) implied by a branch reconciliation
Events have the format
(gene_node, 'v'|'e', event, details)
where gene_node is the vertex ('v') or edge ('e') where the event occurs
and (event, details) are one of the following
'spec', snode = speciation event at species node snode
'gene', snode = extant gene (leaf) at species node snode
'dup', snode = duplication event along species branch snode
'loss', snode = loss event occuring along species branch snode
'trans', (src, dst) = horizontal transfer event from source species src
to destination species dst
"""
for node, branch_path in brecon.iteritems():
for i, (snode, event) in enumerate(branch_path):
if event == "dup":
yield (node, "v", "dup", snode)
elif event == "spec":
yield (node, "v", "spec", snode)
elif event == "gene":
yield (node, "v", "gene", snode)
elif event == "specloss":
yield (node, "e", "spec", snode)
# mark the species branch in which we expected a gene lineage
# but it is absent
next_snode = branch_path[i+1][0]
for schild in snode.children:
if schild != next_snode:
yield (node, "e", "loss", schild)
elif event == "trans":
# the target species is the species that one of the children
# map to
assert len(node.children) == 2, len(node.children)
starget = brecon[node.children[0]][0][0]
if starget == snode:
starget = brecon[node.children[1]][0][0]
assert starget != snode
yield (node, "v", "trans", (snode, starget))
elif event == "transloss":
# the gene is lost in this species
yield (node, "e", "loss", snode)
# it transfers to the next species
yield (node, "e", "trans", (snode, branch_path[i+1][0]))
else:
raise Exception("unknown event '%s'" % event)
def write_bevents(out, bevents):
"""
Writes branch events to file
"""
for node, kind, event, details in bevents:
if event == "trans":
out.write("%s\t%s\t%s\t%s\t%s\n" %
(str(node.name), kind, event, str(details[0].name),
str(details[1].name)))
else:
out.write("%s\t%s\t%s\t%s\n" %
(str(node.name), kind, event, str(details.name)))
#=============================================================================
# add implied speciation nodes to a gene tree
def add_spec_node(node, snode, tree, recon, events):
"""
insert new speciation node above gene node 'node' from gene tree 'tree'
new node reconciles to species node 'snode'. Modifies recon and events
accordingly
"""
newnode = treelib.TreeNode(tree.new_name())
parent = node.parent
# find index of node in parent's children
nodei = parent.children.index(node)
# insert new node into tree
tree.add_child(parent, newnode)
parent.children[nodei] = newnode
parent.children.pop()
tree.add_child(newnode, node)
# add recon and events info
recon[newnode] = snode
events[newnode] = "spec"
return newnode
def add_implied_spec_nodes(tree, stree, recon, events):
"""
adds speciation nodes to tree that are implied but are not present
because of gene losses
"""
added_nodes = []
for node in list(tree):
# process this node and the branch above it
# handle root node specially
if node.parent is None:
# ensure root of gene tree properly reconciles to
# root of species tree
if recon[node] == stree.root:
continue
tree.root = treelib.TreeNode(tree.new_name())
tree.add_child(tree.root, node)
recon[tree.root] = stree.root
events[tree.root] = "spec"
added_nodes.append(tree.root)
# determine starting and ending species
sstart = recon[node]
send = recon[node.parent]
# the species path is too short to have implied speciations
if sstart == send:
continue
parent = node.parent
# determine species path of this gene branch (node, node->parent)
snode = sstart.parent
while snode != send:
added_nodes.append(add_spec_node(node, snode, tree, recon, events))
node = node.parent
snode = snode.parent
# determine whether node.parent is a dup
# if so, send (a.k.a. species end) is part of species path
if events[parent] == "dup":
added_nodes.append(add_spec_node(node, send, tree, recon, events))
return added_nodes
#=============================================================================
# reconciliation rearrangements
def change_recon_up(recon, node, events=None):
"""
Move the mapping of a node up one branch
"""
if events is not None and events[node] == "spec":
# promote speciation to duplication
# R'(v) = e(R(u))
events[node] = "dup"
else:
# R'(v) = p(R(u))
recon[node] = recon[node].parent
def change_recon_down(recon, node, schild, events=None):
"""
Move the mapping of a node down one branch
"""
if events is not None and recon[node] == schild:
events[node] = "spec"
else:
recon[node] = schild
def can_change_recon_up(recon, node, events=None):
"""Returns True is recon can remap node one 'step' up"""
if events is not None and events[node] == "spec" and not node.is_leaf():
# promote speciation to duplication
return True
else:
# move duplication up one branch
rnode = recon[node]
prnode = rnode.parent
# rearrangement is valid if
return (not node.is_leaf() and
prnode is not None and # 1. there is parent sp. branch
(node.parent is None or # 2. no parent to restrict move
rnode != recon[node.parent] # 3. not already matching parent
))
def enum_recon(tree, stree, depth=None,
step=0, preorder=None,
recon=None, events=None,
gene2species=None):
"""
Enumerate reconciliations between a gene tree and a species tree
"""
if recon is None:
recon = reconcile(tree, stree, gene2species)
events = label_events(tree, recon)
if preorder is None:
preorder = list(tree.preorder())
# yield current recon
yield recon, events
if depth is None or depth > 0:
for i in xrange(step, len(preorder)):
node = preorder[i]
if can_change_recon_up(recon, node, events):
schild = recon[node]
change_recon_up(recon, node, events)
# recurse
depth2 = depth - 1 if depth is not None else None
for r, e in enum_recon(tree, stree, depth2,
i, preorder,
recon, events):
yield r, e
change_recon_down(recon, node, schild, events)
#=============================================================================
# local rearrangements
def perform_nni(tree, node1, node2, change=0, rooted=True):
"""Proposes a new tree using Nearest Neighbor Interchange
Branch for NNI is specified by giving its two incident nodes (node1 and
node2). Change specifies which subtree of node1 will be swapped with
the uncle. See figure below.
node2
/ \
uncle node1
/ \
child[0] child[1]
special case with rooted branch and rooted=False:
node2
/ \
node2' node1
/ \ / \
uncle * child[0] child[1]
"""
if node1.parent != node2:
node1, node2 = node2, node1
# try to see if edge is one branch (not root edge)
if not rooted and treelib.is_rooted(tree) and \
node2 == tree.root:
# special case of specifying root edge
if node2.children[0] == node1:
node2 = node2.children[1]
else:
node2 = node2.children[0]
# edge is not an internal edge, give up
if len(node2.children) < 2:
return
if node1.parent == node2.parent == tree.root:
uncle = 0
if len(node2.children[0].children) < 2 and \
len(node2.children[1].children) < 2:
# can't do NNI on this branch
return
else:
assert node1.parent == node2
# find uncle
uncle = 0
if node2.children[uncle] == node1:
uncle = 1
# swap parent pointers
node1.children[change].parent = node2
node2.children[uncle].parent = node1
# swap child pointers
node2.children[uncle], node1.children[change] = \
node1.children[change], node2.children[uncle]
def propose_random_nni(tree):
"""
Propose a random NNI rearrangement
"""
nodes = tree.nodes.values()
# find edges for NNI
while True:
node1 = random.sample(nodes, 1)[0]
if not node1.is_leaf() and node1.parent is not None:
break
node2 = node1.parent
#a = node1.children[random.randint(0, 1)]
#b = node2.children[1] if node2.children[0] == node1 else node2.children[0]
#assert a.parent.parent == b.parent
return node1, node2, | |
import cv2, time, numpy as np
# test pictures
pic1 = 'test_images/i_robot_1.jpg' # robot body image
pic2 = 'test_images/i_robot_2.jpg' # robot full body image
pic3 = 'test_images/i_robot_3.jpg' # robot face image
pic4 = 'test_images/human_face.jpg' # human face
pic5 = 'test_images/human_upper_body.jpg' # human upper body
pic6 = 'test_images/full_body_color_1.jpg' # human full body in suit
pic7 = 'test_images/full_body_color_2.jpg' # human full body casual clothes
# pic8 = 'test_images/pedestrians_1.jpg' # 4 humans (full body) WARNING: DO NOT USE ON CPU
pic9 = 'test_images/stock_pic_man.jpg' # human arms up full body
pic10 = 'test_images/fullbody-copy.jpg' # human in suit full body 1
pic11 = 'test_images/fullbody_x.jpg' # human in suit full body 2
pic12 = 'test_images/full_body_y1.jpg' # human full body
pic13 = 'test_images/spiderman_full_1.jpg' # spiderman full
pic14 = 'test_images/human_full_y.jpg' # full body
pic15 = 'test_images/test_15.jpg' # human arms up
pic16 = 'test_images/test_16.jpg' # human arm up (R)
pic17 = 'test_images/test_17.jpg' # human arm up (L)
pic18 = 'test_images/test_18.jpg' # human arms on hips
# cascade classifiers
cc_front = cv2.CascadeClassifier('pretrained/haarcascade_frontalface_default.xml')
cc_profile = cv2.CascadeClassifier('pretrained/haarcascade_profileface.xml')
# cc_full = cv2.CascadeClassifier('pretrained/haarcascade_fullbody.xml')
cc_upper = cv2.CascadeClassifier('pretrained/HS.xml')
def estimate_skin_tone(face_roi):
""" Find average color in region of image corresponding to face
:param face_roi: region of interest where face is detected as matrix
:return: RGB value of skin tone estimate
"""
return [int(face_roi[:, :, i].mean()) for i in range(face_roi.shape[-1])]
def video_test():
""" TEST: Image processing in real-time (face, upper body and full body detection) """
# video capture
cap = cv2.VideoCapture(0)
while True:
# read picture
_, frame = cap.read()
# gray = cv2.cvtColor(src=frame, code=cv2.COLOR_RGBA2GRAY)
'''
# detect face, upper body and full body
face, _01, _02 = cc_front.detectMultiScale3(image=gray, scaleFactor=1.3,
minNeighbors=5, outputRejectLevels=True)
upper_body, _10, _20 = cc_upper.detectMultiScale3(image=gray, scaleFactor=1.1,
minNeighbors=7, outputRejectLevels=True)
full_body, _001, _002 = cc_full.detectMultiScale3(image=gray, scaleFactor=1.008,
minNeighbors=6, outputRejectLevels=True)
# draw rectangles
for (x, y, w, h) in face:
cv2.rectangle(img=frame, pt1=(x, y), pt2=(x + w, y + h), color=(0, 255, 0), thickness=2)
for (x, y, w, h) in upper_body:
cv2.rectangle(img=frame, pt1=(x, y), pt2=(x + w, y + h), color=(0, 0, 0), thickness=2)
for (x, y, w, h) in full_body:
cv2.rectangle(img=frame, pt1=(x, y), pt2=(x + w, y + h), color=(0, 0, 255), thickness=2)
'''
frame = limb_tracker(frame=frame)
# display the resulting frame
cv2.imshow(winname='Frame', mat=frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# when everything done, release the capture
cap.release()
cv2.destroyAllWindows()
def centroid_of_rect(roi):
""" Finds the coordinates for the centroid of a rectangle.
:param roi: rectangle region of interest as matrix
:return: coordinates of centroid within region of interest
"""
return int(roi.shape[0] / 2), int(roi.shape[1] / 2)
def approx_shoulders(upper_body_roi):
""" Approximate the coordinates of shoulders based on upper body region of interest detection.
Here I assume that these coordinates are more often than not located as follows:
-> Right shoulder: 1/4 of height on Y-axis, 1/6 of width on X-axis
-> Left shoulder: 1/4 of height on Y-axis, 5/6 of width on X-axis
:param upper_body_roi: rectangle region of interest as matrix
:return: 2 sets of coordinates for shoulder positions
"""
height = upper_body_roi.shape[0]; width = upper_body_roi.shape[1]
return (int(width / 6), int((height / 4) * 3)), (int((width / 6) * 5), int((height / 4) * 3))
def approx_biceps(thresh, shoulder_pts, dst):
""" Approximate biceps position. This is achieved by creating an array of line segments all
starting at the shoulder points, going outwards at inclinations of n degrees (n being every
integer value between 1) 10° up 180°, and 2) 350° down 180°, for the right and left arm
respectively). Once all the lines are found, we can iterate through all the end points of
these lines to determine whether they are white or black in 'thresholded' frame.
NOTE: If threshold op is performed on hue channel using cv2.THRESH_BINARY & cv2.THRESH_OTSU
threshold types, it will likely yield a result where the human shape is rendered white and
background pixels black.
:param thresh: 'thresholded' image as matrix
:param shoulder_pts: tuple of approximated shoulder positions
:param dst: normal frame as matrix
:return: tuple of approximated coordinates corresponding to the tip of biceps
"""
biceps_R = None; biceps_L = None
# r_lines = []; l_lines = [] # arrays of lines for right/left arm
#############
# RIGHT ARM # FIXME: This is actually the left arm
#############
# starting point is the right shoulder coordinates
r_start1 = shoulder_pts[1]
for i in range(10, 180):
''' We first determine a segment's delimiting point by doing the following:
-> x2 = (x1 + (sin(i° * PI / 180°) * 142px))
-> y2 = (y1 + (cos(i° * PI / 180°) * 142px))
This (x2, y2) gives us the coordinates of the end point of a straight line
segment starting at point (x1, y1) going outward with an inclination of i°
with respect to 180°. The 142px value is arbitrary, it's the measure that
happens to work best with most of my test photos.
'''
sin = np.sin(i * np.pi / 180); cos = np.cos(i * np.pi / 180)
r_end1 = (int(r_start1[0] + (sin * 142)), int(r_start1[1] + (cos * 142)))
try:
''' NOTE: In cv2, the pixel lookup method seems to be inverted(?); coordinates in
matrix are encoded as (y, x) instead of (x, y), hence the need to invert the
order of r_end1 tuple in statement below. I'm unsure why it is this way but it
took an embarrassing amount of time to figure it out. '''
if thresh[r_end1[1], r_end1[0]] == 255:
cv2.circle(img=dst, center=r_end1, radius=3, color=(0, 0, 255), thickness=8)
biceps_R = r_end1
''' Break from loop once we've found the first point starting from the bottom.
ALTERNATIVELY: We may store all the white points in a list (comment below)
and later loop through all of them to determine which one is closest/farthest
from shoulder point by using a distance metric like Hamming or Euclidean. '''
# r_lines.append(r_end1)
break
except IndexError: # catch IndexError for smaller frames
continue
############
# LEFT ARM #
############
# starting point is the left shoulder coordinates
l_start1 = shoulder_pts[0]
''' Same as previously only decrementing from 350° to 180°. '''
for i in range(350, 180, -1):
sin = np.sin(i * np.pi / 180); cos = np.cos(i * np.pi / 180)
l_end1 = (int(l_start1[0] + (sin * 142)), int(l_start1[1] + (cos * 142)))
try:
if thresh[l_end1[1], l_end1[0]] == 255:
cv2.circle(img=dst, center=l_end1, radius=3, color=(0, 0, 255), thickness=8)
biceps_L = l_end1
# l_lines.append(l_end1)
break
except IndexError:
continue
return biceps_L, biceps_R
def approx_forearms(shoulder_pts, biceps_pts, thresh, dst):
""" Approximate forearms position. This is achieved in a way similar to the biceps detection
system; here, we use a measure of the angle of inclination of the biceps to determine where
and how to execute our white pixel probe.
:param shoulder_pts: shoulder coordinates
:param biceps_pts: biceps coordinates
:param thresh: 'thresholded' frame as a matrix
:param dst: normal frame as matrix
:return: tuple of coordinates corresponding to the approx. tip of forearms
"""
forearm_R = None; forearm_L = None
#############
# RIGHT ARM # FIXME: This is actually the left arm
#############
r_start = biceps_pts[1]
try:
''' Given the equation of a line, y = mx + b, we can find the inclination like so:
-> Slope: m = (y2 - y1) / (x2 - x1)
-> Angle: theta = tan^-1(slope)
'''
r_shoulder = shoulder_pts[1]
r_slope = (r_start[1] - r_shoulder[1]) / (r_start[0] - r_shoulder[0])
r_incl = np.arctan(r_slope)
except TypeError:
r_incl = 0
if 1 > abs(r_incl) > 0.01:
for i in range(210, -90, -1):
sin = np.sin(i * np.pi / 180); cos = np.cos(i * np.pi / 180)
try:
r_end = (int(r_start[0] + (sin * 128)), int(r_start[1] + (cos * 128)))
except TypeError:
continue
try:
if thresh[r_end[1], r_end[0]] == 255:
cv2.circle(img=dst, center=r_end, radius=3, color=(0, 0, 255), thickness=8)
forearm_R = r_end
break
except IndexError:
continue
else:
for i in range(0, 180):
sin = np.sin(i * np.pi / 180); cos = np.cos(i * np.pi / 180)
try:
r_end = (int(r_start[0] + (sin * 128)), int(r_start[1] + (cos * 128)))
except TypeError:
continue
try:
if thresh[r_end[1], r_end[0]] == 255:
cv2.circle(img=dst, center=r_end, radius=3, color=(0, 0, 255), thickness=8)
forearm_R = r_end
break
except IndexError:
| |
0.3',
'boa/0.92o': 'boa 0.92o',
'boa/0.93.15': 'boa 0.93.15',
'boa/0.94.14rc21': 'boa 0.94.14rc21',
'cl-http/70.216 (lispworks': 'cl-http 70.216',
'caudium/1.4.9 stable': 'caudium 1.4.9',
'cherokee': 'cherokee 0.6.0',
'cherokee/0.99': 'cherokee 0.99',
'virata-emweb/r6_0_1': 'cisco vpn 3000 concentrator virata emweb r6.2.0',
'virata-emweb/r6_2_0': 'cisco vpn 3000 concentrator virata emweb r6.2.0',
'compaqhttpserver/5.2': 'compaq http server 5.2',
'compaqhttpserver/5.7': 'compaq http server 5.7',
'compaqhttpserver/5.91': 'compaq http server 5.91',
'compaqhttpserver/5.94': 'compaq http server 5.94',
'compaqhttpserver/9.9 hp system management homepage/192.168.3.11': 'compaq http server 9.9',
'cougar/9.5.6001.6264': 'cougar 9.5.6001.6264',
'goahead-webs': 'flexwatch fw-3440-b',
'gatling/0.10': 'gatling 0.10',
'gatling/0.9': 'gatling 0.9',
'globalscape-secure server/3.3': 'globalscape secure server 3.3',
'gws': 'google web server 2.1',
'mfe': 'google web server 2.1',
'sffe': 'google web server 2.1',
'httpi/1.5.2 (demonic/aix)': 'httpi 1.5.2',
'httpi/1.6.1 (demonic/aix)': 'httpi 1.6.1',
'hiawatha v6.11': 'hiawatha 6.11',
'hiawatha/6.2 mod_gwbasic/1.7.3 openxor/0.3.1a': 'hiawatha 6.2',
'ibm_http_server/192.168.127.12 apache/2.0.47 (unix)': 'ibm http server 192.168.127.12',
'ibm_http_server/172.16.17.32 apache/2.0.47 (unix)': 'ibm http server 172.16.17.32',
'ibm_http_server/172.16.58.39 apache/2.0.47 (unix) dav/2': 'ibm http server 172.16.17.32',
'ibm_http_server': 'ibm http server 172.16.31.10',
'ipc@chip': 'ipc@chip 1.04',
'icewarp/8.3': 'icewarp 8.3.0',
'indy/9.00.10': 'indy idhttpserver 9.00.10',
'jana-server/172.16.58.3': 'jana-server 172.16.58.3',
'jetty/5.1.10 (linux/2.6.12 i386 java/1.5.0_05': 'jetty 5.1.10',
'jetty/5.1.1 (linux/2.6.9-5.elsmp i386 java/1.5.0_09': 'jetty 5.1.1',
'jetty(6.1.1)': 'jetty 6.1.1',
'jigsaw/2.2.5': 'jigsaw 2.2.5',
'jigsaw/2.2.6': 'jigsaw 2.2.6',
'jigsaw/2.3.0-beta1': 'jigsaw 2.3.0-beta1',
'kget': 'kget web interface 2.1.3',
'klone/2.1.0rc1': 'klone 2.1.0rc1',
'allegro-software-rompager/2.00': 'konica ip-421/7020 allegro rompager 2.00',
'boa/0.94.13': 'linksys wvc54gc boa 0.94.13',
'listmanagerweb/8.8c (based on tcl-webserver/3.4.2)': 'listmanagerweb 8.8c',
'litespeed': 'litespeed web server 3.3',
'domino-go-webserver/4.6.2.5': 'lotus domino go webserver 192.168.3.11',
'mathopd/1.5p6': 'mathopd 1.5p6',
'microsoft-iis/5.0': 'microsoft iis 5.0',
'microsoft-iis/5.1': 'microsoft iis 5.1',
'microsoft-iis/6.0': 'microsoft iis 6.0',
'microsoft-iis/6.0.0': 'microsoft iis 6.0',
'microsoft-iis/7.0': 'microsoft iis 7.0',
'mongrel 1.0': 'mongrel 1.0',
'aegis_nanoweb/2.2.10-dev (linux': 'nanoweb 2.2.10',
'rapid logic/1.1': 'net2phone rapid logic 1.1',
'thttpd/2.25b 29dec2003': 'netbotz 500 thttpd 2.25b',
'netware-enterprise-web-server/5.1': 'netware enterprise web server 5.1',
'zyxel-rompager/3.02': 'netgear rp114 3.26',
'allegro-software-rompager/2.10': 'netopia router allegro rompager 2.10',
'netscape-enterprise/2.01': 'netscape enterprise server 2.01',
'netscape-enterprise/3.5.1': 'netscape enterprise server 3.5.1',
'netscape-enterprise/3.5.1g': 'netscape enterprise server 3.5.1g',
'netscape-enterprise/4.1': 'netscape enterprise server 4.1',
'netscape-enterprise/6.0': 'netscape enterprise server 6.0',
'netscape-fasttrack/3.02': 'netscape fasttrack 3.02a',
'osu/3.12alpha': 'osu 3.12alpha',
'osu/3.9': 'osu 3.9',
'omnihttpd/2.06': 'omnihttpd 2.06',
'omnihttpd/2.09': 'omnihttpd 2.09',
'omnihttpd/2.10': 'omnihttpd 2.10',
'opensa/1.0.1 / apache/1.3.23 (win32) php/4.1.1 dav/1.0.2': 'opensa 1.0.1',
'opensa/1.0.3 / apache/1.3.26 (win32) mod_ssl/2.8.9 openssl/0.9.6g': 'opensa 1.0.3',
'opensa/1.0.4 / apache/1.3.27 (win32) php/4.2.2 mod_gzip/1.3.19.1a': 'opensa 1.0.4',
'opensa/1.0.5 / apache/1.3.27 (win32) (using ihtml/2.20.500)': 'opensa 1.0.5',
'oracle-application-server-10g oracleas-web-cache-10g/10.1.2.0.0 (n': 'oracle application server 10g 10.1.2.0.0',
'oracle-application-server-10g/10.1.2.0.0 oracle-http-server': 'oracle application server 10g 10.1.2.0.0',
'oracle-application-server-10g/10.1.2.0.2 oracle-http-server': 'oracle application server 10g 10.1.2.0.2',
'oracle-application-server-10g oracleas-web-cache-10g/10.1.2.2.0 (tn': 'oracle application server 10g 10.1.2.2.0',
'oracle-application-server-10g/10.1.2.2.0 oracle-http-server': 'oracle application server 10g 10.1.2.2.0',
'oracle-application-server-10g/10.1.3.0.0 oracle-http-server': 'oracle application server 10g 10.1.3.0.0',
'oracle-application-server-10g/10.1.3.1.0 oracle-http-server': 'oracle application server 10g 10.1.3.1.0',
'oracle-application-server-10g/9.0.4.0.0 oracle-http-server': 'oracle application server 10g 9.0.4.0.0',
'oracle-application-server-10g/9.0.4.1.0 oracle-http-server': 'oracle application server 10g 9.0.4.1.0',
'oracle-application-server-10g/9.0.4.2.0 oracle-http-server': 'oracle application server 10g 9.0.4.2.0',
'oracle-application-server-10g/9.0.4.3.0 oracle-http-server': 'oracle application server 10g 9.0.4.3.0',
'oracle9ias/9.0.2.3.0 oracle http server': 'oracle application server 9i 9.0.2.3.0',
'oracle9ias/9.0.2 oracle http server': 'oracle application server 9i 9.0.2',
'oracle9ias/192.168.3.11 oracle http server': 'oracle application server 9i 192.168.3.11',
'orion/2.0.7': 'orion 2.0.7',
'oversee webserver v1.3.18': 'oversee webserver 1.3.18',
'httpd/1.00': 'packetshaper httpd 1.00',
'wg_httpd/1.0(based boa/0.92q)': 'philips netcam 1.4.8 wg_httpd 1.0',
'thttpd/2.20b 10oct00': 'qnap nas-4100 2.26.0517',
'http server 1.0': 'qnap ts-411u 1.2.0.0531',
'resin/3.0.23': 'resin 3.0.23',
'resin/3.0.6': 'resin 3.0.6',
'web-server/3.0': 'ricoh aficio 6002 3.53.3 web-server 3.0',
'roxen/2.2.213': 'roxen 2.2.213',
'roxen/4.5.111-release2': 'roxen 4.5.111',
'roxen/4.5.145-rc2': 'roxen 4.5.145',
'snap appliances, inc./3.1.603': 'snap appliance 3.1.603',
'snap appliance, inc./3.4.803': 'snap appliance 3.4.803',
'snap appliance, inc./3.4.805': 'snap appliance 3.4.805',
'snap appliance, inc./4.0.830': 'snap appliance 4.0.830',
'snap appliance, inc./4.0.854': 'snap appliance 4.0.854',
'snap appliance, inc./4.0.860': 'snap appliance 4.0.860',
'snapstream': 'snapstream digital video recorder',
'netevi/1.09': 'sony snc-rz30 netevi 1.09',
'netevi/2.05': 'sony snc-rz30 netevi 2.05',
'netevi/2.05g': 'sony snc-rz30 netevi 2.05g',
'netevi/2.06': 'sony snc-rz30 netevi 2.06',
'netevi/2.13': 'sony snc-rz30 netevi 2.13',
'netevi/2.14': 'sony snc-rz30 netevi 2.14',
'netevi/2.24': 'sony snc-rz30 netevi 2.24',
'netevi/3.01': 'sony snc-rz30 netevi 3.01',
'netevi/3.02': 'sony snc-rz30 netevi 3.02',
'netevi/3.03': 'sony snc-rz30 netevi 3.03',
'netevi/3.10': 'sony snc-rz30 netevi 3.10',
'netevi/3.10a': 'sony snc-rz30 netevi 3.10a',
'netevi/3.14': 'sony snc-rz30 netevi 3.14',
'netzoom/1.00': 'sony snc-z20 netzoom 1.00',
'squid/2.5.stable5': 'squid 2.5.stable5',
'squid/2.5.stable6': 'squid 2.5.stable6',
'squid/2.5.stable9': 'squid 2.5.stable9',
'squid/2.6.stable13': 'squid 2.6.stable13',
'squid/2.6.stable4': 'squid 2.6.stable4',
'squid/2.6.stable7': 'squid 2.6.stable7',
'stweb/1.3.27 (unix) authmysql/3.1 mod_jk/1.1.0 php/3.0.18 php/4.2.3 with': 'stweb 1.3.27',
'sun-java-system-web-server/6.1': 'sun java system web server 6.1',
'sun-java-system-web-server/7.0': 'sun java system web server 7.0',
'sun-one-web-server/6.1': 'sun one web server 6.1',
'smssmtphttp': 'symantec mail security for smtp',
'tcl-webserver/3.5.1 may 27, 2004': 'tclhttpd 3.5.1',
'theserver/2.21l': 'theserver 2.21l',
'userland frontier/9.0.1-winnt': 'userland frontier 9.0.1',
'userland frontier/9.5-winnt': 'userland frontier 9.5',
'realvnc/4.0': 'vnc server enterprise edition e4.2.5',
'vswebserver/01.00 index/01.02.01': 'vs web server 01.00.00',
'virtuoso/05.00.3021 (linux) i686-generic-linux-glibc23-32 vdb': 'virtuoso 5.0.3',
'wdaemon/9.6.1': 'wdaemon 9.6.1',
'webrick/1.3.1 (ruby/1.9.0/2006-07-13)': 'webrick 1.3.1',
'wn/2.4.7': 'wn server 2.4.7',
'allegro-software-rompager/3.06b1': 'xerox docuprint n4025 allegro rompager 3.06b1',
'spyglass_microserver/2.01fc1': 'xerox phaser 6200',
'yaws/1.65 yet another web server': 'yaws 1.65',
'yaws/1.68 yet another web server': 'yaws 1.68',
'yaws/1.72 yet another web server': 'yaws 1.72',
'yaws/sys_6.0.5 yet another web server': 'yaws 6.0.5',
'zeus/4.3': 'zeus 4.3',
'zeus/4.41': 'zeus 4.41',
'unknown/0.0 upnp/1.0 conexant-emweb/r6_1_0': 'zoom adsl',
'zope/(zope 2.10.4-final, python 2.4.4, linux2) zserver/1.1 plone/3.0.1': 'zope 2.10.4',
'zope/(zope 2.5.0 (binary release, python 2.1, linux2-x86), python 2.1.2,': 'zope 2.5.0',
'zope/(zope 2.5.1 (source release, python 2.1, linux2), python 2.1.3,': 'zope 2.5.1',
'zope/(zope 2.6.0 (binary release, python 2.1, linux2-x86), python 2.1.3,': 'zope 2.6.0',
'zope/(zope 2.6.1 (source release, python 2.1, linux2), python 2.2.3,': 'zope 2.6.1',
'zope/(zope 2.6.4 (source release, python 2.1, linux2), python 2.2.3,': 'zope 2.6.4',
'zope/(zope 2.7.4-0, python 2.3.5, linux2) zserver/1.1': 'zope 2.7.4',
'squid/2.5.stable12': 'zope 2.7.4',
'zope/(zope 2.7.5-final, python 2.3.4, linux2) zserver/1.1 plone/2.0.5': 'zope 2.7.5',
'zope/(zope 2.7.5-final, python 2.3.5, linux2) zserver/1.1': 'zope 2.7.5',
'zope/(zope 2.7.6-final, python 2.3.5, linux2) zserver/1.1 plone/2.0.5': 'zope 2.7.6',
'zope/(zope 2.7.6-final, python 2.4.0, linux2) zserver/1.1': 'zope 2.7.6',
'zope/(zope 2.7.7-final, python 2.3.5, linux2) zserver/1.1 plone/2.0.5': 'zope 2.7.7',
'zope/(zope 2.7.7-final, python 2.4.4, linux2) zserver/1.1': 'zope 2.7.7',
'zope/(zope 2.7.8-final, python 2.3.5, linux2) zserver/1.1 plone/2.0.5': 'zope 2.7.8',
'zope/(zope 2.7.9-final, python 2.3.5, linux2) zserver/1.1 plone/2.0.4': 'zope 2.7.9',
'zope/(zope 2.8.0-a0, python 2.3.4, linux2) zserver/1.1 plone/2.0-rc3': 'zope 2.8.0',
'zope/(zope 2.8.2-final, python 2.3.5, linux2) zserver/1.1 plone/unknown': 'zope 2.8.2',
'zope/(zope 2.8.4-final, python 2.3.5, linux2) zserver/1.1 plone/unknown': 'zope 2.8.4',
'zope/(zope 2.8.6-final, python 2.3.5, linux2) zserver/1.1 plone/unknown': 'zope 2.8.6',
'zope/(zope 2.8.6-final, python 2.4.4, linux2) zserver/1.1 plone/unknown': 'zope 2.8.6',
'zope/(zope 2.8.7-final, python 2.4.4, linux2) zserver/1.1 plone/unknown': 'zope 2.8.7',
'zope/(zope 2.9.2-, python 2.4.3, linux2) zserver/1.1 plone/unknown': 'zope 2.9.2',
'zope/(zope 2.9.3-, python 2.4.0, linux2) zserver/1.1': 'zope 2.9.3',
'zope/(zope 2.9.3-, python 2.4.2, linux2) zserver/1.1 plone/2.5': 'zope 2.9.3',
'zope/(zope 2.9.5-final, python 2.4.3, linux2) zserver/1.1 plone/2.5.1': 'zope 2.9.5',
'zope/(zope 2.9.6-final, python 2.4.3, linux2) zserver/1.1 plone/2.5.1': 'zope 2.9.6',
'zope/(zope 2.9.6-final, python 2.4.3, linux2) zserver/1.1 plone/2.5.2': 'zope 2.9.6',
'zope/(zope 2.9.7-final, python 2.4.4, linux2) zserver/1.1': 'zope 2.9.7',
'zope/(zope 2.9.8-final, python 2.4.4, linux2) zserver/1.1': 'zope 2.9.8',
'rompager/4.07 upnp/1.0': 'zyxel zywall 10w rompager 4.07',
'and-httpd/0.99.11': 'and-httpd 0.99.11',
'bozohttpd/20060517': 'bozohttpd 20060517',
'bozohttpd/20080303': 'bozohttpd 20080303',
'dwhttpd/4.0.2a7a (inso': 'dwhttpd 4.0.2a7a',
'dwhttpd/4.1a6 (inso': 'dwhttpd 4.1a6',
'dwhttpd/4.2a7 (inso': 'dwhttpd 4.2a7',
'emule': 'emule 0.48a',
'ns-firecat/1.0.x': 'firecat 1.0.0 beta',
'fnord/1.8a': 'fnord 1.8a',
'lighttpd/1.4.13': 'lighttpd 1.4.13',
'lighttpd/1.4.16': 'lighttpd 1.4.16',
'lighttpd/1.4.18': 'lighttpd 1.4.18',
'lighttpd/1.4.19': 'lighttpd 1.4.19',
'lighttpd/1.4.22': 'lighttpd 1.4.22',
'lighttpd/1.5.0': 'lighttpd 1.5.0',
'nginx/0.5.19': 'nginx 0.5.19',
'nginx/0.5.30': 'nginx 0.5.30',
'nginx/0.5.31': 'nginx 0.5.31',
'nginx/0.5.32': 'nginx 0.5.32',
'nginx/0.5.33': 'nginx 0.5.33',
'nginx/0.5.35': 'nginx 0.5.35',
'nginx/0.6.13': 'nginx 0.6.13',
'nginx/0.6.16': 'nginx 0.6.16',
'nginx/0.6.20': 'nginx 0.6.20',
'nginx/0.6.31': 'nginx 0.6.26',
'nostromo 1.9.1': 'nostromo 1.9.1',
'publicfile': 'publicfile',
'thttpd/2.19-mx apr 25 2002': 'thttpd 2.19-mx',
'thttpd/2.19-mx dec 2 2002': 'thttpd 2.19-mx',
'thttpd/2.19-mx jan 24 2006': 'thttpd 2.19-mx',
'thttpd/2.19-mx oct 20 2003': 'thttpd 2.19-mx',
'thttpd/2.23beta1 26may2002': 'thttpd 2.23beta1',
'thttpd/2.24 26oct2003': 'thttpd 2.24',
'thttpd/2.26 ??apr2004': 'thttpd 2.26',
'vqserver/1.9.56 the world\'s most friendly web server': 'vqserver 1.9.56',
'webcamxp': 'webcamxp pro 2007 3.96.000 beta',
}
windows_hints = ['microsoft', 'windows', 'win32']
mac_os_hints = ['macos']
linux_hints = ['suse', 'linux', 'debian', 'solaris', 'red hat', 'unix', 'ubuntu', 'centos']
hosting_hints = ['host', 'hosting']
ftp_servers = {
'crushftp': '*',
'glftpd': 'unix',
'goanywhere ': 'unix',
'proftpd': '*',
'pro-ftpd ': '*',
'pure-ftpd': 'unix',
'pureftpd': 'unix',
'slimftpd ': 'windows',
'slim-ftpd ': 'windows',
'vsftpd ': 'unix',
'wu-ftpd': 'unix',
'wuftpd ': 'unix',
'crushftp': '*',
'alftp': 'windows',
'cerberus ': 'windows',
'completeftp': 'windows',
'filezilla': '*',
'logicaldoc': '*',
'iis': 'windows',
'naslite': 'unix',
'syncplify': 'windows',
'sysax': 'windows',
'war ftp': 'windows',
'ws ftp': 'windows',
'ncftpd': 'unix',
}
smtp_servers = {
'gws': 'google web services',
'ncftpd': 'unix',
'agorum': 'unix',
'atmail': 'unix',
'axigen': 'unix',
'bongo': | |
<reponame>HIPS/firefly-monte-carlo<gh_stars>10-100
import numpy as np
import numpy.random as npr
from abc import ABCMeta
from abc import abstractmethod
EPS = 1e-6 # Gap between bound and likelihood to tolerate floating point error
def log_logistic(x):
# Overflow-avoiding version of the log logistic function.
abs_x = np.abs(x)
return 0.5 * (x - abs_x) - np.log(1+np.exp(-abs_x))
class CacheWithIdxs(object):
def __init__(self, N, N_theta):
self.size = N_theta
self.values = np.zeros((N, N_theta))
self.exists = np.zeros((N, N_theta), dtype=bool)
self.lists = [] # A list of lists containing the cached indices
for i in range(N_theta): self.lists.append([])
self.thetas = [None]*N_theta
self.oldest = 0
def retrieve(self, th, idxs):
# Check whether it's in the cache and give the values if it is
# NOTE: cache tests identity, not equality, so if the value of a
# th object changes it will be messed up
for i, th_cache in enumerate(self.thetas):
if th_cache is not None and np.all(th == th_cache) and np.all(self.exists[idxs, i]):
self.oldest = (i + 1) % len(self.thetas)
return self.values[idxs, i]
def store(self, th, idxs, new_values):
for i, th_cache in enumerate(self.thetas):
if th_cache is not None and th is th_cache:
assert(np.all(th == th_cache)), "Value of th changed" # This can be turned off for performance
vacant = np.where(np.logical_not(self.exists[idxs, i]))
self.values[idxs[vacant], i] = new_values[vacant]
self.exists[idxs[vacant], i] = 1
self.lists[i] += list(idxs[vacant])
return
# if we didn't find it, we have a new theta
i = self.oldest
self.oldest = (self.oldest + 1) % len(self.thetas)
self.thetas[i] = th.copy()
self.exists[self.lists[i], i] = 0
self.exists[idxs, i] = 1
self.values[idxs, i] = new_values
del self.lists[i][:]
self.lists[i] += list(idxs)
class SimpleCache(object):
def __init__(self, N_theta):
self.size = N_theta
self.values = np.zeros(N_theta)
self.thetas = [None]*N_theta
self.oldest = 0
def retrieve(self, th):
# Check whether it's in the cache and give the values if it is
# NOTE: cache tests identity, not equality, so if the value of a
# th object changes it will be messed up
for i, th_cache in enumerate(self.thetas):
if th_cache is not None and th is th_cache:
assert(np.all(th == th_cache)), "Value of th changed"
self.oldest = (i + 1) % len(self.thetas)
return self.values[i]
def store(self, th, new_values):
for i, th_cache in enumerate(self.thetas):
if th_cache is not None and np.all(th == th_cache): return
# if we didn't find it, we have a new theta
i = self.oldest
self.oldest = (self.oldest + 1) % len(self.thetas)
self.thetas[i] = th.copy()
self.values[i] = new_values
class Model(object):
__metaclass__ = ABCMeta
def __init__(self, cache_size=2):
# To make things cache-friendly, should always evaluate the old value first
self.pseudo_lik_cache = CacheWithIdxs(self.N, cache_size)
self.p_marg_cache = SimpleCache(cache_size)
self.num_lik_evals = 0
self.num_D_lik_evals = 0
def log_p_joint(self, th, z):
# joint distribution over th and z
return self._logPrior(th) + self._logBProduct(th) \
+ np.sum(self.log_pseudo_lik(th, z.bright))
def D_log_p_joint(self, th, z):
# Derivative wrt theta of the joint distribution
return self._D_logPrior(th) + self._D_logBProduct(th) \
+ np.sum(self._D_log_pseudo_lik(th, z.bright), axis=0)
def log_pseudo_lik(self, th, idxs):
# Pseduo-likelihood: ratio of bright to dark
# proabilities of indices idxs at th
# Check for cached value:
cached_value = self.pseudo_lik_cache.retrieve(th, idxs)
if cached_value is not None:
# this is only to test the cache. Comment out for real use
# assert np.all(cached_value == self._LBgap(th,idxs) + np.log(1-np.exp(-self._LBgap(th,idxs))) )
return cached_value
# Otherwise compute it:
gap = self._LBgap(th,idxs)
result = gap + np.log(1-np.exp(-gap)) # this way avoids overflow
self.pseudo_lik_cache.store(th, idxs, result)
self.num_lik_evals += len(idxs)
return result
def _D_log_pseudo_lik(self, th, idxs):
# Derivative of pseudo-likelihood wrt theta
gap = self._LBgap(th,idxs)
D_LBgap = self._D_LBgap(th, idxs)
self.num_D_lik_evals += len(idxs)
return D_LBgap/(1-np.exp(-gap)).reshape((len(idxs),) + (1,)*th.ndim)
def log_p_marg(self, th, z=None):
# marginal posterior prob. Takes z as an optional agrument but doesn't use it
cached_value = self.p_marg_cache.retrieve(th)
if cached_value != None:
# this is only to test the cache. Comment out for real use
# assert cached_value == self._logPrior(th) + np.sum(self._logL(th, range(self.N)))
return cached_value
result = self._logPrior(th) + np.sum(self._logL(th, range(self.N)))
self.p_marg_cache.store(th, result)
self.num_lik_evals += self.N
return result
def D_log_p_marg(self, th, z=None):
self.num_D_lik_evals += self.N
return self._D_logPrior(th) + np.sum(self._D_logL(th, range(self.N)), axis=0)
def log_lik_all(self, th):
return np.sum(self._logL(th, range(self.N)))
def reset(self):
# resets the counters and cache for a fresh start
self.pseudo_lik_cache = CacheWithIdxs(self.N, self.pseudo_lik_cache.size)
self.p_marg_cache = SimpleCache(self.p_marg_cache.size)
self.num_lik_evals = 0
self.num_D_lik_evals = 0
@abstractmethod
def _logL(self, th, idxs):
pass
@abstractmethod
def _D_logL(self, th, idxs):
pass
@abstractmethod
def _logB(self, th, idxs):
pass
@abstractmethod
def _D_logB(self, th, idxs):
pass
@abstractmethod
def _LBgap(self, th, idxs):
pass
@abstractmethod
def _D_LBgap(self, th, idxs):
pass
@abstractmethod
def _logBProduct(self, th):
pass
@abstractmethod
def _D_logBProduct(self, th):
pass
@abstractmethod
def _logPrior(self, th):
pass
@abstractmethod
def _D_logPrior(self, th):
pass
@abstractmethod
def draw_from_prior(self):
pass
class LogisticModel(Model):
def __init__(self, x, t, th0=1, y0=1.5, th_map=None):
'''
x : Data, a (N, D) array
t : Targets, a (N) array of 0s and 1s
th0 : Scale of the prior on weights
th_map : Size (D) array, an estimate of MAP, for tuning the bounds
y0 : Point at which to make bounds tight (if th_map
not given)
'''
self.N, self.D = x.shape
Model.__init__(self)
self.dat = x*(2*t[:,None]-1)
if th_map is None: # create the same bound for all data points
y0_vect = np.ones(self.N)*y0
else: # create bounds to be tight at th_map
y0_vect = np.dot(self.dat, th_map[:,None])[:,0]
a, b, c = self._logistic_bound(y0_vect)
self.coeffs = (a, b, c)
# Compute sufficient statistics of data
self.dat_sum = np.sum(self.dat*b[:,None], 0)
self.dat_prod = np.dot(self.dat.T, self.dat*a[:,None])
# Other hyperparameters
self.th0 = th0
self.th_shape = (self.D,)
def _logL(self, th, idxs):
# logistic regression log likelihoods
# returns an array of size idxs.size
y = np.dot(self.dat[idxs,:],th[:,None])[:,0]
return log_logistic(y)
def _D_logL(self, th, idxs):
# sum of derivative of log likelihoods of data points idxs
y = np.dot(self.dat[idxs,:],th[:,None])[:,0]
return self.dat[idxs,:]*(np.exp(-y)/(1+np.exp(-y)))[:,None]
def _logB(self, th, idxs):
# lower bound on logistic regression log likelihoods
# returns an array of size idxs.size
y = np.dot(self.dat[idxs,:],th[:,None])[:,0]
a, b, c = self.coeffs
return a[idxs]*y**2 + b[idxs]*y + c[idxs]
def _LBgap(self, th, idxs):
# sum of derivative of log likelihoods of data points idxs
y = np.dot(self.dat[idxs,:],th[:,None])[:,0]
L = log_logistic(y)
a, b, c = self.coeffs
B = a[idxs]*y**2 + b[idxs]*y + c[idxs]
return L - B
def _D_logB(self, th, idxs):
y = np.dot(self.dat[idxs,:],th[:,None])[:,0]
a, b, c = self.coeffs
return self.dat[idxs,:]*(2*a[idxs]*y + b[idxs])[:,None]
def _D_LBgap(self, th, idxs):
# sum of derivative of log likelihoods of data points idxs
y = np.dot(self.dat[idxs,:],th[:,None])[:,0]
a, b, c = self.coeffs
scalar_LBgap = (np.exp(-y)/(1+np.exp(-y))) - (2*a[idxs]*y + b[idxs])
return self.dat[idxs,:]*scalar_LBgap[:,None]
def _logBProduct(self, th):
# log of the product of all the lower bounds
y = np.dot(th, self.dat_sum)
y2 = np.dot(th[None,:],np.dot(self.dat_prod,th[:,None]))
return y2 + y # note: we're ignoring a constant here since we don't care about normalization
def _D_logBProduct(self, th):
return self.dat_sum + 2*np.dot(self.dat_prod,th[:,None])[:,0]
def _logPrior(self, th):
return -0.5*np.sum((th/self.th0)**2)
def _D_logPrior(self, th):
return -th/self.th0**2
def draw_from_prior(self):
return npr.randn(self.D)*self.th0
def _logistic_bound(self, y0):
# Coefficients of a quadratic lower bound to the log-logistic function
# i.e a*x**2 + b*x + c < log( exp(x)/(1+exp(x)) )
# y0 parameterizes a family of lower bounds to the logistic function
# (the bound is tight at +/- y0)
pexp = np.exp(y0)
nexp = np.exp(-y0)
f = pexp + nexp
a = -0.25/y0*(pexp-nexp)/(2 + f)
b = 0.5*np.ones(y0.size)
c = -a*y0**2 - 0.5*np.log(2 + f) - EPS
return (a, b, c)
class MulticlassLogisticModel(Model):
def __init__(self, x, t, K, th0=1, y0=1.5, th_map=None):
'''
Softmax classification over K classes. The weight, th, are an array
of size (K, D)
Parameters:
x : Data, a (N, D) array
t : Targets, a (N) array of integers from 0 to K-1
th0 : Scale of the prior on weights
th_map : Size (K, D) array, an estimate of MAP, for tuning the bounds
y0 : Point at which to make bounds tight (if th_map not given)
'''
assert K == max(t)-min(t) + 1
self.N, self.D = x.shape
Model.__init__(self)
self.x = x
self.t = t
self.K = K
self.t_hot = np.zeros((self.N, self.K)) # "1-hot" coding
self.t_hot[np.arange(self.N),self.t] = 1
# Creating y_vect, an array of size (N, K)
if th_map is None:
# create the same bound for all data points
y0_vect = np.zeros((self.N, self.K))
y0_vect[np.arange(self.N),t] = | |
<filename>tests/test_logic.py
import unittest
import random
import math
from unittest.case import expectedFailure
import model
import numpy as np
from unittest.mock import Mock
from model import logic
ATTR_COUNT = 7 # Number of attributes associated with a Particle
# For reference:
# [0] = x-coord
# [1] = diameter,
# [2] = y-coord (elevation),
# [3] = uid,
# [4] = active (boolean)
# [5] = age counter
# [6] = loop age counter
class TestGetEventParticlesWithOneSubregion(unittest.TestCase):
def setUp(self):
self.test_length = 10
self.num_particles = 3
mock_subregion = Mock()
mock_subregion.leftBoundary.return_value = 0
mock_subregion.rightBoundary.return_value = self.test_length
mock_subregion.getName.return_value = 'Mock_Subregion'
self.mock_sub_list = [mock_subregion]
self.entrainment_events = 3
self.level_limit = np.random.randint(0, np.random.randint(2, 10))
def test_all_active_returns_valid_list(self):
model_particles = np.zeros((self.num_particles, ATTR_COUNT))
model_particles[:,3] = np.arange(self.num_particles) # unique ids
model_particles[:,4] = np.ones(self.num_particles) # all active
model_particles[:,0] = np.random.randint(
self.test_length,
size=self.num_particles ) # random placement
list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
model_particles,
self.level_limit )
self.assertCountEqual(list, model_particles[:,3])
# Height dependancy should not effect list results here
hp_list = list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
model_particles,
self.level_limit,
height_dependant=True )
self.assertCountEqual(hp_list, model_particles[:,3])
self.assertCountEqual(hp_list, list)
# TODO: Mock logging object and assert warning is logged
def test_not_all_active_returns_list_of_2(self):
mp_one_inactive = np.zeros((self.num_particles, ATTR_COUNT))
mp_one_inactive[:,3] = np.arange(self.num_particles)
mp_one_inactive[0][4] = 1
mp_one_inactive[1][4] = 1
mp_one_inactive[:,0] = np.random.randint(self.test_length, size=self.num_particles)
list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
mp_one_inactive,
self.level_limit )
self.assertEqual(len(list), self.num_particles - 1)
active_list = mp_one_inactive[mp_one_inactive[:,4] != 0]
self.assertCountEqual(list, active_list[:,3])
# TODO: Mock logging object and assert warning is logged
def test_none_active_returns_empty_list(self):
np_none_active = np.zeros((self.num_particles, ATTR_COUNT))
np_none_active[:,3] = np.arange(self.num_particles)
np_none_active[:,0] = np.random.randint(self.test_length, size=self.num_particles)
empty_list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
np_none_active,
self.level_limit )
self.assertEqual(len(empty_list), 0)
def test_all_ghost_particles_returns_ghost_particles(self):
np_all_ghost = np.zeros((self.num_particles, ATTR_COUNT))
np_all_ghost[:,3] = np.arange(self.num_particles)
np_all_ghost[:,0] = -1
ghost_list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
np_all_ghost,
self.level_limit )
self.assertCountEqual(ghost_list, np_all_ghost[:,3])
def test_some_ghost_particles_returns_ghost_and_regular(self):
np_all_ghost = np.zeros((self.num_particles, ATTR_COUNT))
np_all_ghost[:,3] = np.arange(self.num_particles)
np_all_ghost[:,0] = -1
ghost_list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list,
np_all_ghost,
self.level_limit )
self.assertCountEqual(ghost_list, np_all_ghost[:,3])
# Do we need a tear down method?
class TestGetEventParticlesWithNSubregions(unittest.TestCase):
def setUp(self):
self.test_length = 20
self.num_particles = 6
mock_subregion_0 = Mock()
mock_subregion_0.leftBoundary.return_value = 0
mock_subregion_0.rightBoundary.return_value = self.test_length / 2
mock_subregion_0.getName.return_value = 'Mock_Subregion_0'
mock_subregion_1 = Mock()
mock_subregion_1.leftBoundary.return_value = self.test_length / 2
mock_subregion_1.rightBoundary.return_value = self.test_length
mock_subregion_1.getName.return_value = 'Mock_Subregion_1'
self.mock_sub_list_2 = [mock_subregion_0, mock_subregion_1]
self.entrainment_events = 3
self.level_limit = np.random.randint(0, np.random.randint(2, 10))
def test_all_active_returns_3_per_subregion(self):
model_particles = np.zeros((self.num_particles, ATTR_COUNT))
model_particles[:,3] = np.arange(self.num_particles) # unique ids
model_particles[:,4] = np.ones(self.num_particles) # all active
# Randomly place first three particles in Subregion 1
model_particles[0:3, 0] = np.random.randint(
9,
size=3 )
# Randomly place last three particles in Subregion 2
model_particles[3:6, 0] = np.random.randint(
11,
self.test_length,
size=3 )
list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list_2,
model_particles,
self.level_limit )
self.assertCountEqual(list, model_particles[:,3])
self.assertEqual(len(list), self.entrainment_events * 2)
# Height dependancy should not effect list results here
hp_list = list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list_2,
model_particles,
self.level_limit,
height_dependant=True )
self.assertCountEqual(hp_list, model_particles[:,3])
self.assertCountEqual(hp_list, list)
def test_active_in_1_subregion_returns_only_active(self):
mp_half_active = np.zeros((self.num_particles, ATTR_COUNT))
mp_half_active[:,3] = np.arange(self.num_particles)
mp_half_active[0:3, 4] = np.ones(int((self.num_particles/2))) # First half active
mp_half_active[0:3, 0] = np.random.randint(10,size=3 )
mp_half_active[3:6, 0] = np.random.randint(10, self.test_length, size=3 )
list = logic.get_event_particles(
self.entrainment_events,
self.mock_sub_list_2,
mp_half_active,
self.level_limit )
active_particles = mp_half_active[mp_half_active[:,4] != 0]
self.assertCountEqual(list, active_particles[:,3])
self.assertEqual(len(list), 3)
def test_particle_on_boundary_is_not_returned_twice(self):
one_particle_on_boundary = np.zeros((1, ATTR_COUNT))
one_particle_on_boundary[0][4] = 1
one_particle_on_boundary[0][0] = 10
# Use custom entrainment_event count for simplicity
entrainment_events = 1
list = logic.get_event_particles(
entrainment_events,
self.mock_sub_list_2,
one_particle_on_boundary,
self.level_limit )
self.assertEqual(len(list), 1)
# Test Define Subregions
class TestDefineSubregions(unittest.TestCase):
def setUp(self):
self.bed_length = 10
self.iterations = 10
def test_bad_subregion_count_returns_value_error(self):
subregion_count = 3
with self.assertRaises(ValueError):
subregion_list = logic.define_subregions(self.bed_length,
subregion_count,
self.iterations)
subregion_zero = 0
with self.assertRaises(ValueError):
subregion_list = logic.define_subregions(self.bed_length,
subregion_zero,
self.iterations)
def test_good_parameters_return_good_subregion_list(self):
subregion_count_even = 2
left_boundary = 0
middle_boundary = self.bed_length / 2
right_boundary = self.bed_length
subregion_list = logic.define_subregions(self.bed_length,
subregion_count_even,
self.iterations)
# Check number of subregions
self.assertEqual(len(subregion_list), 2)
# Check boundary definitions
self.assertEqual(subregion_list[0].leftBoundary(), left_boundary)
self.assertEqual(subregion_list[0].rightBoundary(), middle_boundary)
self.assertEqual(subregion_list[1].leftBoundary(), middle_boundary)
self.assertEqual(subregion_list[1].rightBoundary(), right_boundary)
subregion_count_odd = 5
sub_length = self.bed_length / subregion_count_odd
left_boundary = 0
middle_boundary_1 = left_boundary + sub_length*1
middle_boundary_2 = left_boundary + sub_length*2
middle_boundary_3 = left_boundary + sub_length*3
middle_boundary_4 = left_boundary + sub_length*4
right_boundary = self.bed_length
subregion_list_odd = logic.define_subregions(self.bed_length,
subregion_count_odd,
self.iterations)
# Check number of subregions
self.assertEqual(len(subregion_list_odd), 5)
# Check boundary definitions
self.assertEqual(subregion_list_odd[0].leftBoundary(), left_boundary)
self.assertEqual(subregion_list_odd[0].rightBoundary(), middle_boundary_1)
self.assertEqual(subregion_list_odd[1].leftBoundary(), middle_boundary_1)
self.assertEqual(subregion_list_odd[1].rightBoundary(), middle_boundary_2)
self.assertEqual(subregion_list_odd[2].leftBoundary(), middle_boundary_2)
self.assertEqual(subregion_list_odd[2].rightBoundary(), middle_boundary_3)
self.assertEqual(subregion_list_odd[3].leftBoundary(), middle_boundary_3)
self.assertEqual(subregion_list_odd[3].rightBoundary(), middle_boundary_4)
self.assertEqual(subregion_list_odd[4].leftBoundary(), middle_boundary_4)
self.assertEqual(subregion_list_odd[4].rightBoundary(), right_boundary)
def test_all_subregion_flux_are_init_0(self):
subregion_count_even = 2
empty_list = np.zeros(self.iterations, dtype=np.int64)
subregion_list_even = logic.define_subregions(self.bed_length,
subregion_count_even,
self.iterations)
for subregion in subregion_list_even:
self.assertEqual(len(subregion.getFluxList()), self.iterations)
self.assertCountEqual(subregion.getFluxList(), empty_list)
# TODO: test that incrementFlux() works
# Test Build Streambed
class TestBuildStreambed(unittest.TestCase):
# compatibiliy is assured by validation at start of model
def test_compat_diam_returns_good_particles(self):
stream_length = 100
diameter = 0.5
expected_number_particles = stream_length / diameter
bed_particles = logic.build_streambed(stream_length, diameter)
self.assertEqual(len(bed_particles), expected_number_particles)
expected_centres = np.arange(diameter/2, expected_number_particles*diameter, step=diameter)
expected_ids = np.arange(1, int(expected_number_particles)+1)*-1
expected_attr = np.zeros(int(expected_number_particles))
expected_diam = np.ones(int(expected_number_particles))*diameter
self.assertIsNone(np.testing.assert_array_equal(expected_centres[::-1], bed_particles[:,0]))
self.assertIsNone(np.testing.assert_array_equal(expected_ids[::-1], bed_particles[:,3]))
self.assertIsNone(np.testing.assert_array_equal(expected_diam[::-1], bed_particles[:,1]))
for attribute_idx in [2, 4, 5, 6]:
self.assertIsNone(np.testing.assert_array_equal(expected_attr, bed_particles[:,attribute_idx]))
final_particle_idx = -len(bed_particles)
final_particle_extent = bed_particles[final_particle_idx][0] + diameter/2
self.assertEqual(final_particle_extent, stream_length)
class TestSetModelParticles(unittest.TestCase):
def setUp(self):
stream_length = 10
self.diam = 0.5
self.pack_fraction = 0.8
# Directly from https://math.stackexchange.com/questions/2293201/
# Variables used for geometric placement
d = np.divide(np.multiply(np.divide(self.diam, 2),
self.diam),
self.diam)
self.h = np.sqrt(np.square(self.diam) - np.square(d))
# Mock a full bed_particles array
num_bed_particles = int(stream_length/self.diam)
bed_particles = np.zeros([num_bed_particles, ATTR_COUNT], dtype=float)
bed_particles[:,0] = np.arange(self.diam/2, stream_length+(self.diam/2), step=self.diam)
bed_particles[:,3] = np.arange(1, num_bed_particles+1)*-1
self.bed_particles = bed_particles
# Make all vertices created by the touching bed particles available
# -----> 0.5, 1.0, 1.5, ... , 9.5 (with stream length 10)
self.available_vertices = np.arange(self.diam, stream_length, step=self.diam)
def test_model_particles_placed_at_valid_locations(self):
model_particles, model_supports = logic.set_model_particles(self.bed_particles,
self.available_vertices,
self.diam,
self.pack_fraction,
self.h)
# Particles should only be placed at available vertices
self.assertTrue(set(model_particles[:,0]).issubset(self.available_vertices))
# All placements should be unique
self.assertTrue(len(model_particles[:,0]) == len(set(model_particles[:,0])))
# All ids should be unique
# There should be no stacking
self.assertEqual(len(set(model_particles[:,2])), 1)
def test_all_model_particles_have_valid_initial_attributes(self):
model_particles, model_supports = logic.set_model_particles(self.bed_particles,
self.available_vertices,
self.diam,
self.pack_fraction,
self.h)
# all diam = self.diam
expected_diam = np.ones(len(model_particles)) * self.diam
self.assertCountEqual(model_particles[:,1], expected_diam)
# unique id's
self.assertTrue(len(model_particles[:,3]) == len(set(model_particles[:,3])))
# all model are active
expected_activity = np.ones(len(model_particles))
self.assertCountEqual(model_particles[:,4], expected_activity)
# 0 age counter and loop age
expected_age_and_loop = np.zeros(len(model_particles))
self.assertCountEqual(model_particles[:,5], expected_age_and_loop)
self.assertCountEqual(model_particles[:,6], expected_age_and_loop)
# Supports should all be negative (resting on the bed)
self.assertTrue(len(model_particles), len(model_particles[model_particles[:,3] < 0]))
class TestComputeAvailableVerticesLifted(unittest.TestCase):
def setUp(self):
# make bed particles
self.stream_length = 5
self.diam = 0.5
# Mock a full bed_particles array
num_bed_particles = int(self.stream_length/self.diam) # 10 bed particles
bed_particles = np.zeros([num_bed_particles, ATTR_COUNT], dtype=float)
bed_particles[:,0] = np.arange(self.diam/2, self.stream_length+(self.diam/2), step=self.diam)
bed_particles[:,3] = np.arange(num_bed_particles) # unique ids
self.bed_particles = bed_particles
self.expected_bed_vertices = np.arange(self.diam, self.stream_length, step=self.diam)
def test_only_bed_and_empty_lifted_returns_expected_bed_vert(self):
level_limit = 3 # Arbitrary level limit
empty_model_particles = np.empty((0, ATTR_COUNT))
# Bed of length n should return n-1 available vertices
available_vertices = logic.compute_available_vertices(empty_model_particles, self.bed_particles, self.diam,
level_limit=level_limit, lifted_particles=[])
self.assertEqual(len(self.bed_particles)-1, len(available_vertices))
self.assertCountEqual(available_vertices, self.expected_bed_vertices)
def test_bed_and_all_model_lifted_returns_expected_bed_vertices(self):
level_limit = 3
num_model_particles = 3
model_particles = np.zeros([num_model_particles, ATTR_COUNT], dtype=float)
# Particles will be at the first 3 available vertices
model_particles[:,0] = self.expected_bed_vertices[0:3]
model_particles[:,3] = np.arange(num_model_particles)
# Bed of length n should return n-1 available vertices
available_vertices = logic.compute_available_vertices(model_particles, self.bed_particles, self.diam,
level_limit=level_limit, lifted_particles=model_particles[:,3].astype(int))
self.assertEqual(len(available_vertices), len(self.bed_particles)-1)
self.assertCountEqual(available_vertices, self.expected_bed_vertices)
def test_not_touching_and_one_lifted_model_returns_valid_vertices(self):
level_limit = 3
num_model_particles = 3
model_particles = np.zeros([num_model_particles, ATTR_COUNT], dtype=float)
# Particles will be at the first 3 available vertices
model_particles[:,0] = self.expected_bed_vertices[0:3]
model_particles[:,3] = np.arange(num_model_particles)
# Lift first particle, keep later 2 particles -- t/f locations of first particles should be be available
# and locations of second and third particle should not be avaliable
available_vertices = logic.compute_available_vertices(model_particles, self.bed_particles, self.diam,
level_limit=level_limit, lifted_particles=model_particles[0][3].astype(int))
expected_vertices = np.delete(self.expected_bed_vertices, [1,2])
self.assertEqual(len(available_vertices), len(expected_vertices))
self.assertCountEqual(available_vertices, expected_vertices)
class TestComputeAvailableVerticesNotLifted(unittest.TestCase):
def setUp(self):
# make bed particles
self.stream_length = 5
self.diam = 0.5
# Mock a full bed_particles array
num_bed_particles = int(self.stream_length/self.diam) # 10 bed particles
bed_particles = np.zeros([num_bed_particles, ATTR_COUNT], dtype=float)
bed_particles[:,0] = np.arange(self.diam/2, self.stream_length+(self.diam/2), | |
<reponame>logstar/scedar
import numpy as np
import scipy.sparse as spsp
import seaborn as sns
import scedar.eda as eda
import matplotlib as mpl
mpl.use("agg", warn=False) # noqa
import matplotlib.pyplot as plt
import pytest
class TestSparseSampleFeatureMatrix(object):
"""docstring for TestSparseSampleFeatureMatrix"""
sfm5x10_arr = spsp.csr_matrix(np.random.ranf(50).reshape(5, 10))
sfm3x3_arr = spsp.csr_matrix(np.random.ranf(9).reshape(3, 3))
sfm5x10_lst = spsp.csr_matrix(
list(map(list, np.random.ranf(50).reshape(5, 10))))
plt_arr = spsp.csr_matrix(np.arange(60).reshape(6, 10))
plt_sdm = eda.SampleFeatureMatrix(plt_arr,
sids=list("abcdef"),
fids=list(map(lambda i: 'f{}'.format(i),
range(10))))
# array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
# [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
# [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
# [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
# [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
# [50, 51, 52, 53, 54, 55, 56, 57, 58, 59]])
ref_plt_f_sum = np.arange(0, 501, 100) + np.arange(10).sum()
ref_plt_s_sum = np.arange(0, 55, 6) + np.arange(0, 51, 10).sum()
ref_plt_f_mean = ref_plt_f_sum / 10
ref_plt_s_mean = ref_plt_s_sum / 6
ref_plt_f_cv = np.arange(10).std(ddof=1) / ref_plt_f_mean
ref_plt_s_cv = np.arange(0, 51, 10).std(ddof=1) / ref_plt_s_mean
ref_plt_f_gc = np.apply_along_axis(eda.stats.gc1d, 1, plt_arr.toarray())
ref_plt_s_gc = np.apply_along_axis(eda.stats.gc1d, 0, plt_arr.toarray())
ref_plt_f_a15 = np.array([0, 5, 10, 10, 10, 10])
ref_plt_s_a35 = np.array([2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
def test_init_x_none(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(None)
def test_init_x_bad_type(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix([[0, 1], ['a', 2]])
def test_init_x_1d(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix([1, 2, 3])
def test_init_dup_sfids(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm5x10_lst, [0, 0, 1, 2, 3])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm5x10_lst, ['0', '0', '1', '2', '3'])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm5x10_lst, None, [0, 0, 1, 2, 3])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm5x10_lst, None,
['0', '0', '1', '2', '3'])
def test_init_empty_x_sfids(self):
sfm1 = eda.SampleFeatureMatrix(np.array([[], []]), None, [])
assert sfm1._x.shape == (2, 0)
assert sfm1._sids.shape == (2,)
assert sfm1._fids.shape == (0,)
np.testing.assert_equal(sfm1.s_sum(), [])
np.testing.assert_equal(sfm1.f_sum(), [0, 0])
np.testing.assert_equal(sfm1.s_cv(), [])
np.testing.assert_equal(np.isnan(sfm1.f_cv()), [True, True])
sfm2 = eda.SampleFeatureMatrix(np.empty((0, 0)))
assert sfm2._x.shape == (0, 0)
assert sfm2._sids.shape == (0,)
assert sfm2._fids.shape == (0,)
np.testing.assert_equal(sfm2.s_sum(), [])
np.testing.assert_equal(sfm2.f_sum(), [])
np.testing.assert_equal(sfm2.s_cv(), [])
np.testing.assert_equal(sfm2.f_cv(), [])
def test_init_wrong_sid_len(self):
# wrong sid size
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm5x10_lst, list(range(10)), list(range(5)))
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm5x10_lst, list(range(10)))
def test_init_wrong_fid_len(self):
# wrong fid size
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm5x10_lst, list(range(5)), list(range(2)))
def test_init_wrong_sfid_len(self):
# wrong sid and fid sizes
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm5x10_lst, list(range(10)), list(range(10)))
def test_init_non1d_sfids(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm3x3_arr, np.array([[0], [1], [2]]),
np.array([[0], [1], [1]]))
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm3x3_arr, np.array([[0], [1], [2]]),
np.array([0, 1, 2]))
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(self.sfm3x3_arr, np.array([0, 1, 2]),
np.array([[0], [1], [2]]))
def test_init_bad_sid_type(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [False, True, 2], [0, 1, 1])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [[0], [0, 1], 2], [0, 1, 1])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, np.array([0, 1, 2]), [0, 1, 1])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [(0), (0, 1), 2], [0, 1, 1])
def test_init_bad_fid_type(self):
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [0, 1, 2], [False, True, 2])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [0, 1, 2], [[0], [0, 1], 2])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [0, 1, 2], [(0), (0, 1), 2])
with pytest.raises(Exception) as excinfo:
eda.SampleFeatureMatrix(
self.sfm3x3_arr, [0, 1, 2], np.array([0, 1, 2]))
def test_valid_init(self):
eda.SampleFeatureMatrix(
self.sfm5x10_arr, list(range(5)), list(range(10)))
eda.SampleFeatureMatrix(self.sfm5x10_arr, None, list(range(10)))
eda.SampleFeatureMatrix(self.sfm5x10_arr, list(range(5)), None)
eda.SampleFeatureMatrix(np.arange(10).reshape(-1, 1))
eda.SampleFeatureMatrix(np.arange(10).reshape(1, -1))
def test_ind_x(self):
sids = list("abcdef")
fids = list(range(10, 20))
sfm = eda.SampleFeatureMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
# select sf
ss_sfm = sfm.ind_x([0, 5], list(range(9)))
assert ss_sfm._x.shape == (2, 9)
assert ss_sfm.sids == ['a', 'f']
assert ss_sfm.fids == list(range(10, 19))
# select with Default
ss_sfm = sfm.ind_x()
assert ss_sfm._x.shape == (6, 10)
assert ss_sfm.sids == list("abcdef")
assert ss_sfm.fids == list(range(10, 20))
# select with None
ss_sfm = sfm.ind_x(None, None)
assert ss_sfm._x.shape == (6, 10)
assert ss_sfm.sids == list("abcdef")
assert ss_sfm.fids == list(range(10, 20))
# select non-existent inds
with pytest.raises(IndexError) as excinfo:
sfm.ind_x([6])
with pytest.raises(IndexError) as excinfo:
sfm.ind_x(None, ['a'])
def test_ind_x_empty(self):
sids = list("abcdef")
fids = list(range(10, 20))
sfm = eda.SampleFeatureMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
empty_s = sfm.ind_x([])
assert empty_s._x.shape == (0, 10)
assert empty_s._sids.shape == (0,)
assert empty_s._fids.shape == (10,)
empty_f = sfm.ind_x(None, [])
assert empty_f._x.shape == (6, 0)
assert empty_f._sids.shape == (6,)
assert empty_f._fids.shape == (0,)
empty_sf = sfm.ind_x([], [])
assert empty_sf._x.shape == (0, 0)
assert empty_sf._sids.shape == (0,)
assert empty_sf._fids.shape == (0,)
def test_id_x(self):
sids = list("abcdef")
fids = list(range(10, 20))
sfm = eda.SampleFeatureMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
# select sf
ss_sfm = sfm.id_x(['a', 'f'], list(range(10, 15)))
assert ss_sfm._x.shape == (2, 5)
assert ss_sfm.sids == ['a', 'f']
assert ss_sfm.fids == list(range(10, 15))
# select with Default
ss_sfm = sfm.id_x()
assert ss_sfm._x.shape == (6, 10)
assert ss_sfm.sids == list("abcdef")
assert ss_sfm.fids == list(range(10, 20))
# select with None
ss_sfm = sfm.id_x(None, None)
assert ss_sfm._x.shape == (6, 10)
assert ss_sfm.sids == list("abcdef")
assert ss_sfm.fids == list(range(10, 20))
# select non-existent inds
# id lookup raises ValueError
with pytest.raises(ValueError) as excinfo:
sfm.id_x([6])
with pytest.raises(ValueError) as excinfo:
sfm.id_x(None, ['a'])
def test_id_x_empty(self):
sids = list("abcdef")
fids = list(range(10, 20))
sfm = eda.SampleFeatureMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
empty_s = sfm.id_x([])
assert empty_s._x.shape == (0, 10)
assert empty_s._sids.shape == (0,)
assert empty_s._fids.shape == (10,)
empty_f = sfm.id_x(None, [])
assert empty_f._x.shape == (6, 0)
assert empty_f._sids.shape == (6,)
assert empty_f._fids.shape == (0,)
empty_sf = sfm.id_x([], [])
assert empty_sf._x.shape == (0, 0)
assert empty_sf._sids.shape == (0,)
assert empty_sf._fids.shape == (0,)
@pytest.mark.mpl_image_compare
def test_s_ind_regression_scatter_ax(self):
fig, axs = plt.subplots(ncols=2)
fig = self.plt_sdm.s_ind_regression_scatter(
0, 1, figsize=(5, 5), ax=axs[0], ci=None)
plt.close()
return fig
@pytest.mark.mpl_image_compare
def test_s_ind_regression_scatter(self):
return self.plt_sdm.s_ind_regression_scatter(
0, 1, figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_s_id_regression_scatter(self):
return self.plt_sdm.s_id_regression_scatter(
"a", "b", feature_filter=[1, 2, 3], figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_s_ind_regression_scatter_custom_labs(self):
return self.plt_sdm.s_ind_regression_scatter(
0, 1, xlab='X', ylab='Y', figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_s_ind_regression_scatter_custom_bool_ff(self):
return self.plt_sdm.s_ind_regression_scatter(
0, 1, feature_filter=[True]*2 + [False]*8, figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_s_ind_regression_scatter_custom_int_ff(self):
return self.plt_sdm.s_ind_regression_scatter(
0, 1, feature_filter=[0, 1], figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_s_ind_regression_scatter_custom_func_ff(self):
return self.plt_sdm.s_ind_regression_scatter(
0, 1,
feature_filter=lambda x, y: (x in (0, 1, 2)) and (10 < y < 12),
figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_f_ind_regression_scatter_custom_func_sf(self):
# plt_sdm = eda.SampleFeatureMatrix(
# plt_arr,
# sids=list("abcdef"),
# fids=list(map(lambda i: 'f{}'.format(i),
# range(10))))
# array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
# [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
# [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
# [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
# [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
# [50, 51, 52, 53, 54, 55, 56, 57, 58, 59]])
return self.plt_sdm.f_ind_regression_scatter(
0, 1,
sample_filter=lambda x, y: (x in (0, 10, 20)) and (10 < y < 30),
figsize=(5, 5), ci=None)
def test_f_id_ind_x_vec(self):
x = self.plt_sdm.f_ind_x_vec(0)
x2 = self.plt_sdm.f_id_x_vec('f0')
np.testing.assert_equal(x, x2)
np.testing.assert_equal(x, [0, 10, 20, 30, 40, 50])
x3 = self.plt_sdm.f_ind_x_vec(6)
x4 = self.plt_sdm.f_id_x_vec('f6')
np.testing.assert_equal(x3, x4)
np.testing.assert_equal(x3, [6, 16, 26, 36, 46, 56])
@pytest.mark.mpl_image_compare
def test_f_ind_regression_scatter_no_ff(self):
return self.plt_sdm.f_ind_regression_scatter(
0, 1, figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_f_ind_regression_scatter_ind_ff(self):
return self.plt_sdm.f_ind_regression_scatter(
0, 1, sample_filter=[0, 2, 5], figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
def test_f_ind_regression_scatter_labs(self):
return self.plt_sdm.f_ind_regression_scatter(
0, 1, sample_filter=[0, 2, 5], figsize=(5, 5), title='testregscat',
xlab='x', ylab='y', ci=None)
@pytest.mark.mpl_image_compare
def test_f_id_regression_scatter(self):
return self.plt_sdm.f_id_regression_scatter(
'f5', 'f6', sample_filter=[0, 2, 5], figsize=(5, 5), ci=None)
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ind_dist_ax(self):
fig, axs = plt.subplots(ncols=2)
fig = self.plt_sdm.s_ind_dist(0, figsize=(5, 5), ax=axs[0])
plt.close()
return fig
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ind_dist(self):
return self.plt_sdm.s_ind_dist(0, figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_id_dist(self):
return self.plt_sdm.s_id_dist("a", feature_filter=[1, 2, 3],
figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ind_dist_custom_labs(self):
return self.plt_sdm.s_ind_dist(0, xlab='X', ylab='Y', figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ind_dist_custom_bool_ff(self):
return self.plt_sdm.s_ind_dist(
0, feature_filter=[True]*2 + [False]*8, title='testdist',
figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ind_dist_custom_int_ff(self):
return self.plt_sdm.s_ind_dist(
0, feature_filter=[0, 1], figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ind_dist_custom_func_ff(self):
return self.plt_sdm.s_ind_dist(
0, feature_filter=lambda x: x in (0, 1, 2),
figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_f_ind_dist_custom_func_sf(self):
return self.plt_sdm.f_ind_dist(
0, sample_filter=lambda x: x in (0, 10, 20),
figsize=(5, 5))
@pytest.mark.mpl_image_compare
@pytest.mark.filterwarnings("ignore:The | |
<reponame>lpsinger/pytest-doctestplus
import glob
import os
from packaging.version import Version
from textwrap import dedent
import sys
import pytest
import doctest
from pytest_doctestplus.output_checker import OutputChecker, FLOAT_CMP
pytest_plugins = ['pytester']
def test_ignored_whitespace(testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
doctestplus = enabled
"""
)
p = testdir.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-plus")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
doctestplus = enabled
"""
)
p = testdir.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-plus")
reprec.assertoutcome(failed=1, passed=0)
def test_float_cmp(testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
doctestplus = enabled
"""
)
p = testdir.makepyfile(
"""
def f():
'''
>>> x = 1/3.
>>> x
0.333333
'''
fail
def g():
'''
>>> x = 1/3.
>>> x # doctest: +FLOAT_CMP
0.333333
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-plus")
reprec.assertoutcome(failed=1, passed=1)
def test_float_cmp_list(testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
doctestplus = enabled
"""
)
p = testdir.makepyfile(
"""
def g():
'''
>>> x = [1/3., 2/3.]
>>> x # doctest: +FLOAT_CMP
[0.333333, 0.666666]
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-plus")
reprec.assertoutcome(failed=0, passed=1)
def test_float_cmp_global(testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = FLOAT_CMP
doctestplus = enabled
""")
p = testdir.makepyfile("""
def f():
'''
>>> x = 1/3.
>>> x
0.333333
'''
pass
""")
testdir.inline_run(p, "--doctest-plus").assertoutcome(passed=1)
p = testdir.makepyfile("""
def f():
'''
>>> x = 2/7.
>>> x
0.285714
'''
pass
""")
testdir.inline_run(p, "--doctest-plus").assertoutcome(passed=1)
p = testdir.makepyfile("""
def f():
'''
>>> x = 1/13.
>>> x
0.076923
'''
pass
""")
testdir.inline_run(p, "--doctest-plus").assertoutcome(passed=1)
p = testdir.makepyfile("""
def f():
'''
>>> x = 1/13.
>>> x
0.07692
'''
pass
""")
testdir.inline_run(p, "--doctest-plus").assertoutcome(failed=1) # not close enough
def test_float_cmp_and_ellipsis(testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = FLOAT_CMP ELLIPSIS
doctestplus = enabled
""")
# whitespace is normalized by default
p = testdir.makepyfile(
"""
from __future__ import print_function
def f():
'''
>>> for char in ['A', 'B', 'C', 'D', 'E']:
... print(char, float(ord(char)))
A 65.0
B 66.0
...
'''
pass
""")
testdir.inline_run(p, "--doctest-plus").assertoutcome(passed=1)
p = testdir.makepyfile(
"""
from __future__ import print_function
def f():
'''
>>> for char in ['A', 'B', 'C', 'D', 'E']:
... print(char, float(ord(char)))
A 65.0
B 66.0
...
E 69.0
'''
pass
""")
testdir.inline_run(p, "--doctest-plus").assertoutcome(passed=1)
p = testdir.makepyfile(
"""
from __future__ import print_function
def f():
'''
>>> for char in ['A', 'B', 'C', 'D', 'E']:
... print(char, float(ord(char)))
A 65.0
...
C 67.0
...
E 69.0
'''
pass
""")
testdir.inline_run(p, "--doctest-plus").assertoutcome(passed=1)
p = testdir.makepyfile(
"""
from __future__ import print_function
def f():
'''
>>> for char in ['A', 'B', 'C', 'D', 'E']:
... print(char, float(ord(char)))
A 65.0
...
E 70.0
'''
pass
""")
testdir.inline_run(p, "--doctest-plus").assertoutcome(failed=1)
def test_allow_bytes_unicode(testdir):
testdir.makeini(
"""
[pytest]
doctestplus = enabled
"""
)
# These are dummy tests just to check that doctest-plus can parse the
# ALLOW_BYTES and ALLOW_UNICODE options. It doesn't actually implement
# these options.
p = testdir.makepyfile(
"""
def f():
'''
>>> 1 # doctest: +ALLOW_BYTES
1
>>> 1 # doctest: +ALLOW_UNICODE
1
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-plus")
reprec.assertoutcome(passed=1)
class TestFloats:
def test_normalize_floats(self):
c = OutputChecker()
got = "A 65.0\nB 66.0"
want = "A 65.0\nB 66.0"
assert c.normalize_floats(want, got, flags=FLOAT_CMP)
want = "A 65.0\nB 66.0 "
assert c.normalize_floats(want, got, flags=FLOAT_CMP | doctest.NORMALIZE_WHITESPACE)
want = "A 65.0\nB 66.01"
assert not c.normalize_floats(want, got, flags=FLOAT_CMP)
def test_normalize_with_blank_line(self):
c = OutputChecker()
got = "\nA 65.0\nB 66.0"
want = "<BLANKLINE>\nA 65.0\nB 66.0"
assert c.normalize_floats(want, got, flags=FLOAT_CMP)
assert not c.normalize_floats(want, got, flags=FLOAT_CMP | doctest.DONT_ACCEPT_BLANKLINE)
def test_normalize_with_ellipsis(self):
c = OutputChecker()
got = []
for char in ['A', 'B', 'C', 'D', 'E']:
got.append('%s %s' % (char, float(ord(char))))
got = '\n'.join(got)
want = "A 65.0\nB 66.0\n...G 70.0"
assert not c.normalize_floats(want, got, flags=doctest.ELLIPSIS | FLOAT_CMP)
want = "A 65.0\nB 66.0\n..."
assert c.normalize_floats(want, got, flags=doctest.ELLIPSIS | FLOAT_CMP)
want = "A 65.0\nB 66.0\n...\nE 69.0"
assert c.normalize_floats(want, got, flags=doctest.ELLIPSIS | FLOAT_CMP)
got = "\n" + got
want = "<BLANKLINE>\nA 65.0\nB 66.0\n..."
assert c.normalize_floats(want, got, flags=doctest.ELLIPSIS | FLOAT_CMP)
def test_partial_match(self):
c = OutputChecker()
assert not c.partial_match(
['1', '2', '3', '4'],
[['2'], []],
)
assert c.partial_match(
['1', '2', '3', '4'],
[[], ['2'], []],
)
assert c.partial_match(
['1', '2', '3', '4'],
[['1', '2'], []],
)
assert c.partial_match(
['1', '2', '3', '4'],
[['1', '2'], ['4']],
)
assert c.partial_match(
['1', '2', '3', '4', '5'],
[['1', '2'], ['4', '5']],
)
assert c.partial_match(
['1', '2', '3', '4', '5', '6'],
[['1', '2'], ['4'], ['6']],
)
assert c.partial_match(
[str(i) for i in range(20)],
[[], ['1', '2'], ['4'], ['6'], []],
)
assert not c.partial_match(
[str(i) for i in range(20)],
[[], ['1', '2'], ['7'], ['6'], []],
)
def test_requires(testdir):
testdir.makeini(
"""
[pytest]
doctestplus = enabled
""")
# should be ignored
p = testdir.makefile(
'.rst',
"""
.. doctest-requires:: foobar
>>> import foobar
"""
)
testdir.inline_run(p, '--doctest-plus', '--doctest-rst').assertoutcome(skipped=1)
# should run as expected
p = testdir.makefile(
'.rst',
"""
.. doctest-requires:: sys
>>> import sys
"""
)
testdir.inline_run(p, '--doctest-plus', '--doctest-rst').assertoutcome(passed=1)
# testing this in case if doctest-requires just ignores everything and pass unconditionally
p = testdir.makefile(
'.rst',
"""
.. doctest-requires:: sys glob, re,math
>>> import sys
>>> assert 0
"""
)
testdir.inline_run(p, '--doctest-plus', '--doctest-rst').assertoutcome(failed=1)
# package with version is available
p = testdir.makefile(
'.rst',
"""
.. doctest-requires:: sys pytest>=1.0
>>> import sys, pytest
"""
)
testdir.inline_run(p, '--doctest-plus', '--doctest-rst').assertoutcome(passed=1)
# package with version is not available
p = testdir.makefile(
'.rst',
"""
.. doctest-requires:: sys pytest<1.0 glob
>>> import sys, pytest, glob
>>> assert 0
"""
)
# passed because 'pytest<1.0' was not satisfied and 'assert 0' was not evaluated
testdir.inline_run(p, '--doctest-plus', '--doctest-rst').assertoutcome(skipped=1)
def test_ignore_warnings_module(testdir):
# First check that we get a warning if we don't add the IGNORE_WARNINGS
# directive
p = testdir.makepyfile(
"""
def myfunc():
'''
>>> import warnings
>>> warnings.warn('A warning occurred', UserWarning)
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-plus", "-W error")
reprec.assertoutcome(failed=1, passed=0)
# Now try with the IGNORE_WARNINGS directive
p = testdir.makepyfile(
"""
def myfunc():
'''
>>> import warnings
>>> warnings.warn('A warning occurred', UserWarning) # doctest: +IGNORE_WARNINGS
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-plus", "-W error")
reprec.assertoutcome(failed=0, passed=1)
def test_ignore_warnings_rst(testdir):
# First check that we get a warning if we don't add the IGNORE_WARNINGS
# directive
p = testdir.makefile(".rst",
"""
::
>>> import warnings
>>> warnings.warn('A warning occurred', UserWarning)
""")
reprec = testdir.inline_run(p, "--doctest-plus", "--doctest-rst",
"--text-file-format=rst", "-W error")
reprec.assertoutcome(failed=1, passed=0)
# Now try with the IGNORE_WARNINGS directive
p = testdir.makefile(".rst",
"""
::
>>> import warnings
>>> warnings.warn('A warning occurred', UserWarning) # doctest: +IGNORE_WARNINGS
""")
reprec = testdir.inline_run(p, "--doctest-plus", "--doctest-rst",
"--text-file-format=rst", "-W error")
reprec.assertoutcome(failed=0, passed=1)
def test_show_warnings_module(testdir):
p = testdir.makepyfile(
"""
def myfunc():
'''
>>> import warnings
>>> warnings.warn('A warning occurred', UserWarning) # doctest: +SHOW_WARNINGS
UserWarning: A warning occurred
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-plus", "-W error")
reprec.assertoutcome(failed=0, passed=1)
# Make sure it fails if warning message is missing
p = testdir.makepyfile(
"""
def myfunc():
'''
>>> import warnings
>>> warnings.warn('A warning occurred', UserWarning) # doctest: +SHOW_WARNINGS
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-plus", "-W error")
reprec.assertoutcome(failed=1, passed=0)
def test_show_warnings_rst(testdir):
p = testdir.makefile(".rst",
"""
::
>>> import warnings
>>> warnings.warn('A warning occurred', UserWarning) # doctest: +SHOW_WARNINGS
UserWarning: A warning occurred
""")
reprec = testdir.inline_run(p, "--doctest-plus", "--doctest-rst",
"--text-file-format=rst", "-W error")
reprec.assertoutcome(failed=0, passed=1)
# Make sure it fails if warning message is missing
p = testdir.makefile(".rst",
"""
::
>>> import warnings
>>> warnings.warn('A warning occurred', UserWarning) # doctest: +SHOW_WARNINGS
""")
reprec = testdir.inline_run(p, "--doctest-plus", "--doctest-rst",
"--text-file-format=rst", "-W error")
reprec.assertoutcome(failed=1, passed=0)
# Make sure it fails if warning message is missing
p = testdir.makefile(".rst",
"""
::
>>> import warnings
>>> warnings.warn('A warning occurred', UserWarning) # doctest: +SHOW_WARNINGS
Warning: Another warning occurred
""")
reprec = testdir.inline_run(p, "--doctest-plus", "--doctest-rst",
"--text-file-format=rst", "-W error")
reprec.assertoutcome(failed=1, passed=0)
def test_doctest_glob(testdir):
testdir.makefile(
'.md',
foo_1=">>> 1 + 1\n2",
)
testdir.makefile(
'.rst',
foo_2=">>> 1 + 1\n2",
)
testdir.makefile(
'.rst',
foo_3=">>> 1 + 1\n2",
)
testdir.makefile(
'.txt',
foo_4=">>> 1 + 1\n2",
)
testdir.makefile(
'.rst',
bar_2=">>> 1 + 1\n2",
)
testdir.inline_run().assertoutcome(passed=0)
testdir.inline_run('--doctest-plus').assertoutcome(passed=0)
testdir.inline_run('--doctest-plus', '--doctest-rst').assertoutcome(passed=3)
testdir.inline_run(
'--doctest-plus', '--doctest-rst', '--text-file-format', 'txt'
).assertoutcome(passed=1)
testdir.inline_run(
'--doctest-plus', '--doctest-glob', '*.rst'
).assertoutcome(passed=3)
testdir.inline_run(
'--doctest-plus', '--doctest-glob', '*.rst', '--doctest-glob', '*.txt'
).assertoutcome(passed=4)
testdir.inline_run(
'--doctest-plus', '--doctest-glob', '*.rst', '--doctest-glob', | |
don't
# check or write lockfiles. Also returns a single string, no list.
try:
with open(filename, mode=mode) as File:
contents = File.readlines()
return [x.rstrip() for x in contents]
except (IOError, OSError):
return []
def write_list_file(filename, line, mode="w"):
# A "null append" is meaningful, as we can call this to clear the
# lockfile. In this case the main file need not be touched.
if not ("a" in mode and len(line) == 0):
newline = b'\n' if 'b' in mode else '\n'
content = newline.join(line) + newline
with open(filename, mode) as File:
File.write(content)
def primenet_fetch(num_to_get):
if not options.username:
return []
# As of early 2018, here is the full list of assignment-type codes supported by the Primenet server; Mlucas
# v18 (and thus this script) supports only the subset of these indicated by an asterisk in the left column.
# Supported assignment types may be specified via either their PrimeNet number code or the listed Mnemonic:
# Worktype:
# Code Mnemonic Description
# ---- ----------------- -----------------------
# 0 Whatever makes the most sense
# 1 Trial factoring to low limits
# 2 Trial factoring
# 4 P-1 factoring
# 5 ECM for first factor on Mersenne numbers
# 6 ECM on Fermat numbers
# 8 ECM on mersenne cofactors
# *100 SmallestAvail Smallest available first-time tests
# *101 DoubleCheck Double-checking
# *102 WorldRecord World record primality tests
# *104 100Mdigit 100M digit number to LL test (not recommended)
# *150 SmallestAvailPRP First time PRP tests (Gerbicz)
# *151 DoubleCheckPRP Doublecheck PRP tests (Gerbicz)
# *152 WorldRecordPRP World record sized numbers to PRP test (Gerbicz)
# *153 100MdigitPRP 100M digit number to PRP test (Gerbicz)
# 160 PRP on Mersenne cofactors
# 161 PRP double-checks on Mersenne cofactors
# Convert mnemonic-form worktypes to corresponding numeric value, check worktype value vs supported ones:
option_dict = {"SmallestAvail": "100", "DoubleCheck": "101", "WorldRecord": "102", "100Mdigit": "104",
"SmallestAvailPRP": "150", "DoubleCheckPRP": "151", "WorldRecordPRP": "152", "100MdigitPRP": "153"}
if options.worktype in option_dict: # this and the above line of code enables us to use words or numbers on the cmdline
options.worktype = option_dict[options.worktype]
supported = set(['100', '101', '102', '104', '150', '151', '152', '153']
) if program == "MLucas" else set(['100', '101', '102', '104'])
if options.worktype not in supported:
debug_print("Unsupported/unrecognized worktype = " +
options.worktype + " for " + program)
return []
try:
# Get assignment (Loarer's way)
if options.password:
assignment = OrderedDict((
("cores", "1"),
("num_to_get", num_to_get),
("pref", options.worktype),
("exp_lo", ""),
("exp_hi", ""),
("B1", "Get Assignments")
))
openurl = primenet_baseurl + "manual_assignment/?"
debug_print("Fetching work via URL = " +
openurl + urlencode(assignment))
r = s.post(openurl, data=assignment)
return greplike(workpattern, [line.decode('utf-8', 'replace') for line in r.iter_lines()])
# Get assignment using V5 API
else:
guid = get_guid(config)
assignment = ga(guid) # get assignment
debug_print("Fetching work via V5 Primenet = " +
primenet_v5_burl + urlencode(assignment))
tests = []
for _ in range(num_to_get):
r = send_request(guid, assignment)
if r is None or int(r["pnErrorResult"]) != 0:
debug_print(
"ERROR while requesting an assignment on mersenne.org", file=sys.stderr)
break
if r['w'] not in supported:
debug_print("ERROR: Returned assignment from server is not a supported worktype for " + program + ".", file=sys.stderr)
return []
# if options.worktype == LL
if r['w'] in set(['100', '102', '104']):
tests.append("Test="+",".join([r[i] for i in ['k', 'n', 'sf', 'p1']]))
# if options.worktype == DC
elif r['w'] in set(['101']):
tests.append("DoubleCheck="+",".join([r[i] for i in ['k', 'n', 'sf', 'p1']]))
# if PRP type testing, first time
elif r['w'] in set(['150', '152', '153']):
tests.append("PRP="+",".join([r[i] for i in ['k', 'b', 'n', 'c', 'sf', 'saved']]))
# if PRP-DC (probable-primality double-check) testing
elif r['w'] in set(['151']):
tests.append("PRP="+",".join([r[i] for i in ['k', 'b', 'n', 'c', 'sf', 'saved', 'base', 'rt']]))
return tests
except ConnectionError:
debug_print("URL open error at primenet_fetch")
return []
def get_assignment(progress):
w = readonly_list_file(workfile)
tasks = greplike(workpattern, w)
(percent, time_left) = None, None
if progress is not None and type(progress) == tuple and len(progress) == 2:
(percent, time_left) = progress # unpack update_progress output
num_cache = int(options.num_cache) + 1
if time_left is not None and time_left <= options.days_work*24*3600:
# time_left and percent increase are exclusive (don't want to do += 2)
num_cache += 1
debug_print("Time_left is {0} and smaller than limit ({1}), so num_cache is increased by one to {2}".format(
time_left, options.days_work*24*3600, num_cache))
num_to_get = num_to_fetch(tasks, num_cache)
if num_to_get < 1:
debug_print(workfile + " already has " + str(len(tasks)) +
" >= " + str(num_cache) + " entries, not getting new work")
return 0
debug_print("Fetching " + str(num_to_get) + " assignments")
new_tasks = primenet_fetch(num_to_get)
num_fetched = len(new_tasks)
if num_fetched > 0:
debug_print("Fetched {0} assignments:".format(num_fetched))
for new_task in new_tasks:
debug_print("{0}".format(new_task))
write_list_file(workfile, new_tasks, "a")
if num_fetched < num_to_get:
debug_print("Error: Failed to obtain requested number of new assignments, " +
str(num_to_get) + " requested, " + str(num_fetched) + " successfully retrieved")
return num_fetched
resultpattern = re.compile("[Pp]rogram|CUDALucas")
def mersenne_find(line, complete=True):
# Pre-v19 old-style HRF-formatted result used "Program:..."; starting w/v19 JSON-formatted result uses "program",
return resultpattern.search(line)
try:
from statistics import median_low
except ImportError:
def median_low(mylist):
sorts = sorted(mylist)
length = len(sorts)
return sorts[(length-1)//2]
def parse_stat_file(p):
statfile = 'p' + str(p) + '.stat'
if os.path.exists(statfile) is False:
print("ERROR: stat file does not exist")
return 0, None
w = readonly_list_file(statfile) # appended line by line, no lock needed
found = 0
regex = re.compile("Iter# = (.+?) .*?(\d+\.\d+) (m?sec)/iter")
list_usec_per_iter = []
# get the 5 most recent Iter line
for line in reversed(w):
res = regex.search(line)
if res:
found += 1
# keep the last iteration to compute the percent of progress
if found == 1:
iteration = int(res.group(1))
usec_per_iter = float(res.group(2))
unit = res.group(3)
if unit == "sec":
usec_per_iter *= 1000
list_usec_per_iter.append(usec_per_iter)
if found == 5:
break
if found == 0:
return 0, None # iteration is 0, but don't know the estimated speed yet
# take the media of the last grepped lines
usec_per_iter = median_low(list_usec_per_iter)
return iteration, usec_per_iter
def parse_v5_resp(r):
ans = dict()
for line in r.splitlines():
if line == "==END==":
break
option, _, value = line.partition("=")
ans[option] = value
return ans
def send_request(guid, args):
args["g"] = guid
# to mimic mprime, it is necessary to add safe='"{}:,' argument to urlencode, in
# particular to encode JSON in result submission. But safe is not supported by python2...
url_args = urlencode(args)
url_args += "&ss=19191919&sh=ABCDABCDABCDABCDABCDABCDABCDABCD"
try:
r = requests.get(primenet_v5_burl+url_args)
result = parse_v5_resp(r.text)
rc = int(result["pnErrorResult"])
if rc:
if rc in errors:
resmsg = errors[rc]
else:
resmsg = "Unknown error code"
debug_print("PrimeNet error " + str(rc) +
": " + resmsg, file=sys.stderr)
debug_print(result["pnErrorDetail"], file=sys.stderr)
else:
if result["pnErrorDetail"] != "SUCCESS":
debug_print("PrimeNet success code with additional info:")
debug_print(result["pnErrorDetail"])
except HTTPError as e:
debug_print("ERROR receiving answer to request: " +
str(primenet_v5_burl+url_args), file=sys.stderr)
debug_print(e, file=sys.stderr)
return None
except ConnectionError as e:
debug_print("ERROR connecting to server for request: " +
str(primenet_v5_burl+url_args), file=sys.stderr)
debug_print(e, file=sys.stderr)
return None
return result
def create_new_guid():
guid = hex(getrandbits(128))
if guid[:2] == '0x':
guid = guid[2:] # remove the 0x prefix
if guid[-1] == 'L':
guid = guid[:-1] # remove trailling 'L' in python2
# add missing 0 to the beginning"
guid = (32-len(guid))*"0" + guid
return guid
def register_instance(guid):
# register the instance to server, guid is the instance identifier
if options.username is None or options.hostname is None:
parser.error(
"To register the instance, --username and --hostname are required")
hardware_id = sha256(options.cpu_model.encode(
"utf-8")).hexdigest()[:32] # similar as mprime
args = primenet_v5_bargs.copy()
args["t"] = "uc" # update compute command
args["a"] = platform.system() + ('64' if platform.machine().endswith('64')
else '') + ",Mlucas,v" + str(VERSION)
if config.has_option("primenet", "sw_version"):
args["a"] = config.get("primenet", "sw_version")
args["wg"] = "" # only filled on Windows by mprime
args["hd"] = hardware_id # 32 hex char (128 bits)
args["c"] = options.cpu_model[:64] # CPU model (len between 8 and 64)
args["f"] = options.features[:64] # CPU option (like asimd, max len 64)
args["L1"] = options.L1 # L1 cache size in KBytes
args["L2"] = options.L2 # L2 cache size in KBytes
# if smaller or equal to 256,
# server refuses to gives LL assignment
args["np"] = options.np # number of cores
args["hp"] = options.hp # number of hyperthreading cores
args["m"] = options.memory # number of megabytes of physical memory
args["s"] = options.frequency # CPU frequency
args["h"] = 24 # pretend to run 24h/day
args["r"] = 0 # pretend | |
#!/usr/bin/env python
#
# autotextctrl.py - The AutoTextCtrl class.
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides the :class:`AutoTextCtrl` class, an alternative to the
``wx.TextCtrl``, which has auto-completion capability.
I wrote this class because ``wx.TextCtrl`` auto-completion does not work under
OSX, and the ``wx.ComboBox`` does not give me enough fine-grained control with
respect to managing focus.
"""
import logging
import wx
import wx.lib.newevent as wxevent
import fsleyes_widgets.utils as wutils
log = logging.getLogger(__name__)
class AutoTextCtrl(wx.Panel):
"""The ``AutoTextCtrl`` class is essentially a ``wx.TextCtrl`` which is able
to dynamically show a list of options to the user, with a
:class:`AutoCompletePopup`.
"""
def __init__(self, parent, style=0, modal=True):
"""Create an ``AutoTextCtrl``. Supported style flags are:
- :data:`ATC_CASE_SENSITIVE`: restrict the auto-completion
options to case sensitive matches.
- :data:`ATC_NO_PROPAGATE_ENTER`: Cause enter events on the
:class:`AutoCompletePopup` to *not* be propagated upwards as
``EVT_ATC_TEXT_ENTER`` events.
:arg parent: The ``wx`` parent object.
:arg style: Style flags.
:arg modal: If ``True`` (the default), the :class:`AutoCompletePopup`
is shoown modally. This option is primarily for testing
purposes.
"""
wx.Panel.__init__(self, parent)
self.__style = style
self.__modal = modal
self.__popup = None
self.__textCtrl = wx.TextCtrl(self, style=wx.TE_PROCESS_ENTER)
self.__sizer = wx.BoxSizer(wx.HORIZONTAL)
self.__sizer.Add(self.__textCtrl, flag=wx.EXPAND, proportion=1)
self.SetSizer(self.__sizer)
# The takeFocus flag is set by SetTakeFocus,
# and used in __showPopup. The options array
# contains the auto complete options.
self.__takeFocus = False
self.__options = []
self.__textCtrl.Bind(wx.EVT_TEXT, self.__onText)
self.__textCtrl.Bind(wx.EVT_LEFT_DCLICK, self.__onDoubleClick)
self.__textCtrl.Bind(wx.EVT_TEXT_ENTER, self.__onEnter)
self.__textCtrl.Bind(wx.EVT_KEY_DOWN, self.__onKeyDown)
self.__textCtrl.Bind(wx.EVT_SET_FOCUS, self.__onSetFocus)
self .Bind(wx.EVT_SET_FOCUS, self.__onSetFocus)
def __onSetFocus(self, ev):
"""Called when this ``AutoTextCtrl`` or any of its children gains
focus. Makes sure that the text control insertion point is at the
end of its current contents.
"""
ev.Skip()
log.debug('Text control gained focus: {}'.format(
wx.Window.FindFocus()))
# Under wx/GTK, when a text control gains focus,
# it seems to select its entire contents, meaning
# that when the user types something, the current
# contents are replaced with the new contents. To
# prevent this, here we make sure that no text is
# selected, and the insertion point is at the end
# of the current contents.
text = self.__textCtrl.GetValue()
self.__textCtrl.SetSelection(len(text) - 1, len(text) - 1)
self.__textCtrl.SetInsertionPointEnd()
@property
def textCtrl(self):
"""Returns a reference to the internal ``wx.TextCtrl``. """
return self.__textCtrl
@property
def popup(self):
"""Returns a reference to the ``AutoCompletePopup`` or ``None``
if it is not currently shown.
"""
return self.__popup
def AutoComplete(self, options):
"""Set the list of options to be shown to the user. """
self.__options = list(options)
def GetValue(self):
"""Returns the current value shown on this ``AutoTextCtrl``. """
return self.__textCtrl.GetValue()
def SetValue(self, value):
"""Sets the current value shown on this ``AutoTextCtrl``.
.. note:: Calling this method will result in an ``wx.EVT_TEXT``
event being generated - use :meth:`ChangeValue` if you
do not want this to occur.
"""
self.__textCtrl.SetValue(value)
def ChangeValue(self, value):
"""Sets the current value shown on this ``AutoTextCtrl``. """
self.__textCtrl.ChangeValue(value)
def GetInsertionPoint(self):
"""Returns the cursor location in this ``AutoTextCtrl``. """
return self.__textCtrl.GetInsertionPoint()
def SetInsertionPoint(self, idx):
"""Sets the cursor location in this ``AutoTextCtrl``. """
self.__textCtrl.SetInsertionPoint(idx)
def GenEnterEvent(self):
"""Programmatically generates an :data:`EVT_ATC_TEXT_ENTER` event. """
self.__onEnter(None)
def SetTakeFocus(self, takeFocus):
"""If ``takeFocus`` is ``True``, this ``AutoTextCtrl`` will give
itself focus when its ``AutoCompletePopup`` is closed.
"""
self.__takeFocus = takeFocus
def __onKeyDown(self, ev):
"""Called on ``EVT_KEY_DOWN`` events in the text control. """
enter = wx.WXK_RETURN
key = ev.GetKeyCode()
log.debug('Key event on text control: {}'.format(key))
# Make sure the event is propagated
# up the window hierarchy, if we skip it
ev.ResumePropagation(wx.EVENT_PROPAGATE_MAX)
if key != enter:
ev.Skip()
return
if self.GetValue() == '':
log.debug('Enter/right arrow - displaying all options')
self.__showPopup('')
# Let the text control handle the event normally
else:
ev.Skip()
def __onDoubleClick(self, ev):
"""Called when the user double clicks in this ``AutoTextCtrl``.
Creates an :class:`AutoCompletePopup`.
"""
log.debug('Double click on text control - simulating text entry')
self.__onText(None)
def __onText(self, ev):
"""Called when the user changes the text shown on this ``AutoTextCtrl``.
Creates an :class:`AutoCompletePopup`.
"""
text = self.__textCtrl.GetValue()
log.debug('Text - displaying options matching "{}"'.format(text))
self.__showPopup(text)
def __onEnter(self, ev):
"""Called when the user presses enter in this ``AutoTextCtrl``. Generates
an :data:`EVT_ATC_TEXT_ENTER` event.
"""
value = self.__textCtrl.GetValue()
ev = AutoTextCtrlEnterEvent(text=value)
log.debug('Enter - generating ATC enter '
'event (text: "{}")'.format(value))
wx.PostEvent(self, ev)
def __showPopup(self, text):
"""Creates an :class:`AutoCompletePopup` which displays a list of
auto-completion options, matching the given prefix text, to the user.
The popup is not displayed if there are no options with the given
prefix.
"""
text = text.strip()
popup = AutoCompletePopup(
self,
self,
text,
self.__options,
self.__style)
if popup.GetCount() == 0:
popup.Destroy()
return
# Don't take focus unless the AutoCompletePopup
# tells us to (it will call the SetTakeFocus method)
self.__takeFocus = False
# Make sure we get the focus back
# when the popup is destroyed
def refocus(ev):
self.__popup = None
# A call to Raise is required under
# GTK, as otherwise the main window
# won't be given focus.
if wx.Platform == '__WXGTK__':
self.GetTopLevelParent().Raise()
if self.__takeFocus:
self.__textCtrl.SetFocus()
popup.Bind(EVT_ATC_POPUP_DESTROY, refocus)
# The popup has its own textctrl - we
# position the popup so that its textctrl
# is displayed on top of our textctrl,
# with the option list underneath.
posx, posy = self.__textCtrl.GetScreenPosition().Get()
self.__popup = popup
popup.SetSize((-1, -1))
popup.SetPosition((posx, posy))
if self.__modal: popup.ShowModal()
else: popup.Show()
ATC_CASE_SENSITIVE = 1
"""Syle flag for use with the :class:`AutoTextCtrl` class. If set, the
auto-completion pattern matching will be case sensitive.
"""
ATC_NO_PROPAGATE_ENTER = 2
"""Syle flag for use with the :class:`AutoTextCtrl` class. If set,
enter events which occur on the :class:`AutoCompletePopup` list will
*not* be propagated as :attr:`EVT_ATC_TEXT_ENTER` events.
"""
_AutoTextCtrlEnterEvent, _EVT_ATC_TEXT_ENTER = wxevent.NewEvent()
EVT_ATC_TEXT_ENTER = _EVT_ATC_TEXT_ENTER
"""Identifier for the :data:`AutoTextCtrlEnterEvent`, which is generated
when the user presses enter in an :class:`AutoTextCtrl`.
"""
AutoTextCtrlEnterEvent = _AutoTextCtrlEnterEvent
"""Event generated when the user presses enter in an :class:`AutoTextCtrl`.
Contains a single attribute, ``text``, which contains the text in the
``AutoTextCtrl``.
"""
class AutoCompletePopup(wx.Dialog):
"""The ``AutoCompletePopup`` class is used by the :class:`AutoTextCtrl`
to display a list of completion options to the user.
"""
def __init__(self, parent, atc, text, options, style=0):
"""Create an ``AutoCompletePopup``. Accepts the same style flags as
the :class:`AutoTextCtrl`.
:arg parent: The ``wx`` parent object.
:arg atc: The :class:`AutoTextCtrl` that is using this popup.
:arg text: Initial text value.
:arg options: A list of all possible auto-completion options.
:arg style: Style flags.
"""
wx.Dialog.__init__(self,
parent,
style=(wx.NO_BORDER | wx.STAY_ON_TOP))
self.__alive = True
self.__caseSensitive = style & ATC_CASE_SENSITIVE
self.__propagateEnter = not (style & ATC_NO_PROPAGATE_ENTER)
self.__atc = atc
self.__options = options
self.__textCtrl = wx.TextCtrl(self,
value=text,
style=wx.TE_PROCESS_ENTER)
self.__listBox = wx.ListBox( self,
style=(wx.LB_SINGLE))
self.__listBox.Set(self.__getMatches(text))
self.__sizer = wx.BoxSizer(wx.VERTICAL)
self.__sizer.Add(self.__textCtrl, flag=wx.EXPAND)
self.__sizer.Add(self.__listBox, flag=wx.EXPAND, proportion=1)
self.SetSizer(self.__sizer)
self.__textCtrl.SetMinSize(parent.GetSize())
self.__textCtrl.SetFont(parent.GetFont())
self.__listBox .SetFont(parent.GetFont())
self.Layout()
self.Fit()
self.__textCtrl.Bind(wx.EVT_TEXT, self.__onText)
self.__textCtrl.Bind(wx.EVT_TEXT_ENTER, self.__onEnter)
self.__textCtrl.Bind(wx.EVT_KEY_DOWN, self.__onKeyDown)
self.__textCtrl.Bind(wx.EVT_CHAR_HOOK, self.__onKeyDown)
self.__listBox .Bind(wx.EVT_KEY_DOWN, self.__onListKeyDown)
self.__listBox .Bind(wx.EVT_CHAR_HOOK, self.__onListKeyDown)
self.__listBox .Bind(wx.EVT_LISTBOX_DCLICK, self.__onListMouseDblClick)
# Under GTK, the SetFocus/KillFocus event
# objects often don't have a reference to
# the window that received/is about to
# receive focus. In particular, if the
# list box is clicked, a killFocus event
# is triggered, but the list box is not
# passed in. So on mouse down events, we
# force the list box to have focus.
if wx.Platform == '__WXGTK__':
self.__listBox .Bind(wx.EVT_LEFT_DOWN, self.__onListMouseDown)
self.__listBox .Bind(wx.EVT_RIGHT_DOWN, self.__onListMouseDown)
self .Bind(wx.EVT_KILL_FOCUS, self.__onKillFocus)
self.__textCtrl.Bind(wx.EVT_KILL_FOCUS, self.__onKillFocus)
self.__listBox .Bind(wx.EVT_KILL_FOCUS, self.__onKillFocus)
self .Bind(wx.EVT_SET_FOCUS, self.__onSetFocus)
self.__textCtrl.Bind(wx.EVT_SET_FOCUS, self.__onSetFocus)
self.__listBox .Bind(wx.EVT_SET_FOCUS, self.__onSetFocus)
def GetCount(self):
"""Returns the number of auto-completion options currently available.
"""
return self.__listBox.GetCount()
@property
def textCtrl(self):
"""Returns a reference to the ``wx.TextCtrl``."""
return self.__textCtrl
@property
def listBox(self):
"""Returns a reference to the ``wx.ListBox``."""
return self.__listBox
def __onSetFocus(self, ev):
"""Called when this ``AutoCompletePopup`` or any of its children gains
focus. Makes sure that the text control insertion point is at the end
of its current contents.
"""
ev.Skip()
log.debug('Popup gained focus: {}'.format(ev.GetWindow()))
# See note in AutoTextCtrl.__onSetFocus
text = self.__textCtrl.GetValue()
self.__textCtrl.SetSelection(len(text) - 1, len(text) - 1)
self.__textCtrl.SetInsertionPointEnd()
def __onKillFocus(self, ev):
"""Called when this ``AutoCompletePopup`` loses focus. Calls
:meth:`__destroy`.
"""
ev.Skip()
focused = ev.GetWindow()
log.debug('Kill focus event on popup: {}'.format(focused))
objs = (self, self.__textCtrl, self.__listBox)
if focused not in objs:
log.debug('Focus lost - destroying popup')
self.__destroy(False, False)
def __destroy(self, genEnter=True, returnFocus=True):
"""Called by various event handlers. Copies the current value in
this ``AutoCompletePopup`` to the owning :class:`AutoTextCtrl`,
and then (asynchronously) destroys this ``AutoCompletePopup``.
"""
# destroy | |
<reponame>martinjm97/typed-argument-parser<filename>tap/tap.py
from argparse import ArgumentParser
from collections import OrderedDict
from copy import deepcopy
import json
from pprint import pformat
import sys
import time
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Union
from tap.utils import get_class_variables, get_dest, get_git_root, get_git_url, has_git,has_uncommitted_changes,\
is_option_arg, type_to_str
SUPPORTED_DEFAULT_BASE_TYPES = {str, int, float, bool}
SUPPORTED_DEFAULT_OPTIONAL_TYPES = {Optional[str], Optional[int], Optional[float]}
SUPPORTED_DEFAULT_LIST_TYPES = {List[str], List[int], List[float]}
SUPPORTED_DEFAULT_SET_TYPES = {Set[str], Set[int], Set[float]}
SUPPORTED_DEFAULT_COLLECTION_TYPES = SUPPORTED_DEFAULT_LIST_TYPES | SUPPORTED_DEFAULT_SET_TYPES
SUPPORTED_DEFAULT_TYPES = set.union(SUPPORTED_DEFAULT_BASE_TYPES,
SUPPORTED_DEFAULT_OPTIONAL_TYPES,
SUPPORTED_DEFAULT_COLLECTION_TYPES)
class Tap(ArgumentParser):
"""Tap is a typed argument parser that wraps Python's built-in ArgumentParser."""
def __init__(self, *args, **kwargs):
"""Initializes the Tap instance.
:param args: Arguments passed to the super class ArgumentParser.
:param kwargs: Keyword arguments passed to the super class ArgumentParser.
"""
# Whether the arguments have been parsed (i.e. if parse_args has been called)
self._parsed = False
# Set extra arguments to empty list
self.extra_args = []
# Create argument buffer
self.argument_buffer = OrderedDict()
# Get help strings from the comments
self.class_variables = self._get_class_variables()
# Get annotations from self and all super classes up through tap
self._annotations = self._get_annotations()
# Initialize the super class, i.e. ArgumentParser
super(Tap, self).__init__(*args, **kwargs)
# Add arguments to self
self.add_arguments() # Adds user-overridden arguments to the arguments buffer
self._add_arguments() # Adds all arguments in order to self
def _add_argument(self, *name_or_flags, **kwargs) -> None:
"""Adds an argument to self (i.e. the super class ArgumentParser).
Sets the following attributes of kwargs when not explicitly provided:
- type: Set to the type annotation of the argument.
- default: Set to the default value of the argument (if provided).
- required: True if a default value of the argument is not provided, False otherwise.
- action: Set to "store_true" if the argument is a required bool or a bool with default value False.
Set to "store_false" if the argument is a bool with default value True.
- nargs: Set to "*" if the type annotation is List[str], List[int], or List[float].
- help: Set to the argument documentation from the class docstring.
:param name_or_flags: Either a name or a list of option strings, e.g. foo or -f, --foo.
:param kwargs: Keyword arguments.
"""
# Get variable name
variable = get_dest(*name_or_flags, **kwargs)
# Get default if not specified
if hasattr(self, variable):
kwargs['default'] = kwargs.get('default', getattr(self, variable))
# Set required if option arg
if is_option_arg(*name_or_flags) and variable != 'help':
kwargs['required'] = kwargs.get('required', not hasattr(self, variable))
# Set help if necessary
if 'help' not in kwargs:
kwargs['help'] = '('
# Type
if variable in self._annotations:
kwargs['help'] += type_to_str(self._annotations[variable]) + ', '
# Required/default
if kwargs.get('required', False):
kwargs['help'] += 'required'
else:
kwargs['help'] += f'default={kwargs.get("default", None)}'
kwargs['help'] += ')'
# Description
if variable in self.class_variables:
kwargs['help'] += ' ' + self.class_variables[variable]['comment']
# Set other kwargs where not provided
if variable in self._annotations:
# Get type annotation
var_type = self._annotations[variable]
# If type is not explicitly provided, set it if it's one of our supported default types
if 'type' not in kwargs:
if var_type not in SUPPORTED_DEFAULT_TYPES:
raise ValueError(
f'Variable "{variable}" has type "{var_type}" which is not supported by default.\n'
f'Please explicitly add the argument to the parser by writing:\n\n'
f'def add_arguments(self) -> None:\n'
f' self.add_argument("--{variable}", type=func, {"required=True" if kwargs["required"] else f"default={getattr(self, variable)}"})\n\n'
f'where "func" maps from str to {var_type}.')
# If Optional type, extract type
if var_type in SUPPORTED_DEFAULT_OPTIONAL_TYPES:
var_type = var_type.__args__[0]
# If List type, extract type of elements in list and set nargs
elif var_type in SUPPORTED_DEFAULT_COLLECTION_TYPES:
var_type = var_type.__args__[0]
kwargs['nargs'] = kwargs.get('nargs', '*')
# If bool then set action, otherwise set type
if var_type == bool:
kwargs['action'] = kwargs.get('action', f'store_{"true" if kwargs["required"] or not kwargs["default"] else "false"}')
else:
kwargs['type'] = var_type
super(Tap, self).add_argument(*name_or_flags, **kwargs)
def add_argument(self, *name_or_flags, **kwargs) -> None:
"""Adds an argument to the argument buffer, which will later be passed to _add_argument."""
variable = get_dest(*name_or_flags, **kwargs)
self.argument_buffer[variable] = (name_or_flags, kwargs)
def _add_arguments(self) -> None:
"""Add arguments to self in the order they are defined as class variables (so the help string is in order)."""
# Add class variables (in order)
for variable in self.class_variables:
if variable in self.argument_buffer:
name_or_flags, kwargs = self.argument_buffer[variable]
self._add_argument(*name_or_flags, **kwargs)
else:
self._add_argument(f'--{variable}')
# Add any arguments that were added manually in add_arguments but aren't class variables (in order)
for variable, (name_or_flags, kwargs) in self.argument_buffer.items():
if variable not in self.class_variables:
self._add_argument(*name_or_flags, **kwargs)
def add_arguments(self) -> None:
"""Explicitly add arguments to the argument buffer if not using default settings."""
pass
def process_args(self) -> None:
"""Perform additional argument processing and/or validation."""
pass
@staticmethod
def get_reproducibility_info() -> Dict[str, str]:
"""Gets a dictionary of reproducibility information.
Reproducibility information always includes:
- command_line: The command line command used to execute the code.
- time: The current time.
If git is installed, reproducibility information also includes:
- git_root: The root of the git repo where the command is run.
- git_url: The url of the current hash of the git repo where the command is run.
Ex. https://github.com/kswanson-asapp/rationale-alignment/tree/<hash>
- git_has_uncommitted_changes: Whether the current git repo has uncommitted changes.
:return: A dictionary of reproducibility information.
"""
reproducibility = {
'command_line': f'python {" ".join(sys.argv)}',
'time': time.strftime('%c')
}
if has_git():
reproducibility['git_root'] = get_git_root()
reproducibility['git_url'] = get_git_url(commit_hash=True)
reproducibility['git_has_uncommitted_changes'] = has_uncommitted_changes()
return reproducibility
def _log_all(self) -> Dict[str, Any]:
"""Gets all arguments along with reproducibility information.
:return: A dictionary containing all arguments along with reproducibility information.
"""
arg_log = self.as_dict()
arg_log['reproducibility'] = self.get_reproducibility_info()
return arg_log
def parse_args(self,
args: Optional[Sequence[str]] = None,
known_only: bool = False) -> 'Tap':
"""Parses arguments, sets attributes of self equal to the parsed arguments, and processes arguments.
:param args: List of strings to parse. The default is taken from `sys.argv`.
:param known_only: If true, ignores extra arguments and only parses known arguments.
Unparsed arguments are saved to self.extra_args.
:return: self, which is a Tap instance containing all of the parsed args.
"""
# Parse args using super class ArgumentParser's parse_args or parse_known_args function
if known_only:
default_namespace, self.extra_args = super(Tap, self).parse_known_args(args)
else:
default_namespace = super(Tap, self).parse_args(args)
# Copy parsed arguments to self
for variable, value in vars(default_namespace).items():
# Conversion from list to set
if variable in self._annotations and self._annotations[variable] in SUPPORTED_DEFAULT_SET_TYPES:
value = set(value)
# Set variable in self (and deepcopy)
setattr(self, variable, deepcopy(value))
# Process args
self.process_args()
# Indicate that args have been parsed
self._parsed = True
return self
@classmethod
def _get_from_self_and_super(cls,
extract_func: Callable[[type], dict],
dict_type: type = dict) -> Union[Dict[str, Any], OrderedDict]:
"""Returns a dictionary mapping variable names to values.
Variables and values are extracted from classes using key starting
with this class and traversing up the super classes up through Tap.
If super class and sub class have the same key, the sub class value is used.
Super classes are traversed through breadth first search.
:param extract_func: A function that extracts from a class a dictionary mapping variables to values.
:param dict_type: The type of dictionary to use (e.g. dict, OrderedDict, etc.)
:return: A dictionary mapping variable names to values from the class dict.
"""
visited = set()
super_classes = [cls]
dictionary = dict_type()
while len(super_classes) > 0:
super_class = super_classes.pop(0)
if super_class not in visited and issubclass(super_class, Tap):
super_dictionary = extract_func(super_class)
# Update only unseen variables to avoid overriding subclass values
for variable, value in super_dictionary.items():
if variable not in dictionary:
dictionary[variable] = value
for variable in super_dictionary.keys() - dictionary.keys():
dictionary[variable] = super_dictionary[variable]
super_classes += list(super_class.__bases__)
visited.add(super_class)
return dictionary
def _get_class_dict(self) -> Dict[str, Any]:
"""Returns a dictionary mapping class variable names to values from the class dict."""
class_dict = self._get_from_self_and_super(
extract_func=lambda super_class: dict(getattr(super_class, '__dict__', dict()))
)
class_dict = {var: val for var, val in class_dict.items()
if not var.startswith('_') and not callable(val) and not isinstance(val, staticmethod)}
return class_dict
def _get_annotations(self) -> Dict[str, Any]:
"""Returns a dictionary mapping variable names to their type annotations."""
return self._get_from_self_and_super(
extract_func=lambda super_class: dict(getattr(super_class, '__annotations__', dict()))
)
def _get_class_variables(self) -> OrderedDict:
"""Returns an OrderedDict mapping class variables names to their additional information."""
return self._get_from_self_and_super(
extract_func=lambda super_class: get_class_variables(super_class),
dict_type=OrderedDict
)
def _get_argument_names(self) -> Set[str]:
"""Returns a list of variable names corresponding | |
#!/user/bin/env python3
# Note that all the tests in this module require dataset (either network access or cached)
import os
import torch
import torchtext
import json
import hashlib
from torchtext.legacy import data
from parameterized import parameterized
from ..common.torchtext_test_case import TorchtextTestCase
from ..common.parameterized_utils import load_params
from ..common.assets import conditional_remove
from ..common.cache_utils import check_cache_status
def _raw_text_custom_name_func(testcase_func, param_num, param):
info = param.args[0]
name_info = [info['dataset_name'], info['split']]
return "%s_%s" % (
testcase_func.__name__,
parameterized.to_safe_name("_".join(name_info))
)
class TestDataset(TorchtextTestCase):
@classmethod
def setUpClass(cls):
check_cache_status()
def _helper_test_func(self, length, target_length, results, target_results):
self.assertEqual(length, target_length)
if isinstance(target_results, list):
target_results = torch.tensor(target_results, dtype=torch.int64)
if isinstance(target_results, tuple):
target_results = tuple(torch.tensor(item, dtype=torch.int64) for item in target_results)
self.assertEqual(results, target_results)
def test_wikitext2_legacy(self):
from torchtext.legacy.datasets import WikiText2
cachedir = os.path.join(self.project_root, ".data", "wikitext-2")
conditional_remove(cachedir)
ds = WikiText2
TEXT = data.Field(lower=True, batch_first=True)
train, valid, test = ds.splits(TEXT)
TEXT.build_vocab(train)
train_iter, valid_iter, test_iter = data.BPTTIterator.splits(
(train, valid, test), batch_size=3, bptt_len=30)
train_iter, valid_iter, test_iter = ds.iters(batch_size=4,
bptt_len=30)
conditional_remove(cachedir)
def test_wikitext2(self):
from torchtext.experimental.datasets import WikiText2
cachedir = os.path.join(self.project_root, ".data", "wikitext-2")
conditional_remove(cachedir)
cachefile = os.path.join(self.project_root, ".data", "wikitext-2-v1.zip")
conditional_remove(cachefile)
train_dataset, valid_dataset, test_dataset = WikiText2()
train_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))
valid_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, valid_dataset)))
test_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))
self._helper_test_func(len(train_data), 2049990, train_data[20:25],
[5024, 89, 21, 3, 1838])
self._helper_test_func(len(test_data), 241859, test_data[30:35],
[914, 4, 36, 11, 569])
self._helper_test_func(len(valid_data), 214417, valid_data[40:45],
[925, 8, 2, 150, 8575])
vocab = train_dataset.get_vocab()
tokens_ids = [vocab[token] for token in 'the player characters rest'.split()]
self.assertEqual(tokens_ids, [2, 286, 503, 700])
# Add test for the subset of the standard datasets
train_iter, valid_iter, test_iter = torchtext.datasets.WikiText2(split=('train', 'valid', 'test'))
self._helper_test_func(len(train_iter), 36718, next(train_iter), ' \n')
self._helper_test_func(len(valid_iter), 3760, next(valid_iter), ' \n')
self._helper_test_func(len(test_iter), 4358, next(test_iter), ' \n')
del train_iter, valid_iter, test_iter
train_dataset, test_dataset = WikiText2(split=('train', 'test'))
train_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))
test_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))
self._helper_test_func(len(train_data), 2049990, train_data[20:25],
[5024, 89, 21, 3, 1838])
self._helper_test_func(len(test_data), 241859, test_data[30:35],
[914, 4, 36, 11, 569])
conditional_remove(cachedir)
conditional_remove(cachefile)
def test_penntreebank_legacy(self):
from torchtext.legacy.datasets import PennTreebank
# smoke test to ensure penn treebank works properly
TEXT = data.Field(lower=True, batch_first=True)
ds = PennTreebank
train, valid, test = ds.splits(TEXT)
TEXT.build_vocab(train)
train_iter, valid_iter, test_iter = data.BPTTIterator.splits(
(train, valid, test), batch_size=3, bptt_len=30)
train_iter, valid_iter, test_iter = ds.iters(batch_size=4,
bptt_len=30)
def test_penntreebank(self):
from torchtext.experimental.datasets import PennTreebank
# smoke test to ensure penn treebank works properly
train_dataset, valid_dataset, test_dataset = PennTreebank()
train_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))
valid_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, valid_dataset)))
test_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))
self._helper_test_func(len(train_data), 924412, train_data[20:25],
[9919, 9920, 9921, 9922, 9188])
self._helper_test_func(len(test_data), 82114, test_data[30:35],
[397, 93, 4, 16, 7])
self._helper_test_func(len(valid_data), 73339, valid_data[40:45],
[0, 0, 78, 426, 196])
vocab = train_dataset.get_vocab()
tokens_ids = [vocab[token] for token in 'the player characters rest'.split()]
self.assertEqual(tokens_ids, [2, 2550, 3344, 1125])
# Add test for the subset of the standard datasets
train_dataset, test_dataset = PennTreebank(split=('train', 'test'))
train_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))
test_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))
self._helper_test_func(len(train_data), 924412, train_data[20:25],
[9919, 9920, 9921, 9922, 9188])
self._helper_test_func(len(test_data), 82114, test_data[30:35],
[397, 93, 4, 16, 7])
train_iter, test_iter = torchtext.datasets.PennTreebank(split=('train', 'test'))
self._helper_test_func(len(train_iter), 42068, next(train_iter)[:15], ' aer banknote b')
self._helper_test_func(len(test_iter), 3761, next(test_iter)[:25], " no it was n't black mond")
del train_iter, test_iter
def test_text_classification(self):
from torchtext.experimental.datasets import AG_NEWS
# smoke test to ensure ag_news dataset works properly
datadir = os.path.join(self.project_root, ".data")
if not os.path.exists(datadir):
os.makedirs(datadir)
train_dataset, test_dataset = AG_NEWS(root=datadir, ngrams=3)
self._helper_test_func(len(train_dataset), 120000, train_dataset[-1][1][:10],
[3525, 319, 4053, 34, 5407, 3607, 70, 6798, 10599, 4053])
self._helper_test_func(len(test_dataset), 7600, test_dataset[-1][1][:10],
[2351, 758, 96, 38581, 2351, 220, 5, 396, 3, 14786])
# Add test for the subset of the standard datasets
train_dataset = AG_NEWS(split='train')
self._helper_test_func(len(train_dataset), 120000, train_dataset[-1][1][:10],
[2155, 223, 2405, 30, 3010, 2204, 54, 3603, 4930, 2405])
def test_raw_ag_news(self):
train_iter, test_iter = torchtext.datasets.AG_NEWS()
self._helper_test_func(len(train_iter), 120000, next(train_iter)[1][:25], 'Wall St. Bears Claw Back ')
self._helper_test_func(len(test_iter), 7600, next(test_iter)[1][:25], 'Fears for T N pension aft')
del train_iter, test_iter
@parameterized.expand(
load_params('raw_datasets.jsonl'),
name_func=_raw_text_custom_name_func)
def test_raw_text_name_property(self, info):
dataset_name = info['dataset_name']
split = info['split']
if dataset_name == "Multi30k" or dataset_name == 'WMT14':
data_iter = torchtext.experimental.datasets.raw.DATASETS[dataset_name](split=split)
else:
data_iter = torchtext.datasets.DATASETS[dataset_name](split=split)
self.assertEqual(str(data_iter), dataset_name)
@parameterized.expand(
load_params('raw_datasets.jsonl'),
name_func=_raw_text_custom_name_func)
def test_raw_text_classification(self, info):
dataset_name = info['dataset_name']
split = info['split']
if dataset_name == "Multi30k" or dataset_name == 'WMT14':
data_iter = torchtext.experimental.datasets.raw.DATASETS[dataset_name](split=split)
else:
data_iter = torchtext.datasets.DATASETS[dataset_name](split=split)
self.assertEqual(len(data_iter), info['NUM_LINES'])
self.assertEqual(hashlib.md5(json.dumps(next(data_iter), sort_keys=True).encode('utf-8')).hexdigest(), info['first_line'])
if dataset_name == "AG_NEWS":
self.assertEqual(torchtext.datasets.URLS[dataset_name][split], info['URL'])
self.assertEqual(torchtext.datasets.MD5[dataset_name][split], info['MD5'])
elif dataset_name == "Multi30k":
self.assertEqual(torchtext.experimental.datasets.raw.URLS[dataset_name][split], info['URL'])
self.assertEqual(torchtext.experimental.datasets.raw.MD5[dataset_name][split], info['MD5'])
elif dataset_name == "WMT14":
self.assertEqual(torchtext.experimental.datasets.raw.URLS[dataset_name], info['URL'])
self.assertEqual(torchtext.experimental.datasets.raw.MD5[dataset_name], info['MD5'])
else:
self.assertEqual(torchtext.datasets.URLS[dataset_name], info['URL'])
self.assertEqual(torchtext.datasets.MD5[dataset_name], info['MD5'])
del data_iter
@parameterized.expand(list(sorted(torchtext.datasets.DATASETS.keys())))
def test_raw_datasets_split_argument(self, dataset_name):
if 'statmt' in torchtext.datasets.URLS[dataset_name]:
return
dataset = torchtext.datasets.DATASETS[dataset_name]
train1 = dataset(split='train')
train2, = dataset(split=('train',))
for d1, d2 in zip(train1, train2):
self.assertEqual(d1, d2)
# This test only aims to exercise the argument parsing and uses
# the first line as a litmus test for correctness.
break
# Exercise default constructor
_ = dataset()
@parameterized.expand(["AG_NEWS", "WikiText2", "IMDB"])
def test_datasets_split_argument(self, dataset_name):
dataset = torchtext.experimental.datasets.DATASETS[dataset_name]
train1 = dataset(split='train')
train2, = dataset(split=('train',))
for d1, d2 in zip(train1, train2):
self.assertEqual(d1, d2)
# This test only aims to exercise the argument parsing and uses
# the first line as a litmus test for correctness.
break
# Exercise default constructor
_ = dataset()
def test_next_method_dataset(self):
train_iter, test_iter = torchtext.datasets.AG_NEWS()
for_count = 0
next_count = 0
for line in train_iter:
for_count += 1
try:
next(train_iter)
next_count += 1
except:
break
self.assertEqual((for_count, next_count), (60000, 60000))
def test_imdb(self):
from torchtext.experimental.datasets import IMDB
from torchtext.vocab import Vocab
# smoke test to ensure imdb works properly
train_dataset, test_dataset = IMDB()
self._helper_test_func(len(train_dataset), 25000, train_dataset[0][1][:10],
[13, 1568, 13, 246, 35468, 43, 64, 398, 1135, 92])
self._helper_test_func(len(test_dataset), 25000, test_dataset[0][1][:10],
[13, 125, 1051, 5, 246, 1652, 8, 277, 66, 20])
# Test API with a vocab input object
old_vocab = train_dataset.get_vocab()
new_vocab = Vocab(counter=old_vocab.freqs, max_size=2500)
new_train_data, new_test_data = IMDB(vocab=new_vocab)
# Add test for the subset of the standard datasets
train_dataset = IMDB(split='train')
self._helper_test_func(len(train_dataset), 25000, train_dataset[0][1][:10],
[13, 1568, 13, 246, 35468, 43, 64, 398, 1135, 92])
train_iter, test_iter = torchtext.datasets.IMDB()
self._helper_test_func(len(train_iter), 25000, next(train_iter)[1][:25], 'I rented I AM CURIOUS-YEL')
self._helper_test_func(len(test_iter), 25000, next(test_iter)[1][:25], 'I love sci-fi and am will')
del train_iter, test_iter
def test_iwslt2017(self):
from torchtext.experimental.datasets import IWSLT2017
train_dataset, valid_dataset, test_dataset = IWSLT2017()
self.assertEqual(len(train_dataset), 206112)
self.assertEqual(len(valid_dataset), 888)
self.assertEqual(len(test_dataset), 1568)
de_vocab, en_vocab = train_dataset.get_vocab()
def assert_nth_pair_is_equal(n, expected_sentence_pair):
de_sentence = [de_vocab.itos[index] for index in train_dataset[n][0]]
en_sentence = [en_vocab.itos[index] for index in train_dataset[n][1]]
expected_de_sentence, expected_en_sentence = expected_sentence_pair
self.assertEqual(de_sentence, expected_de_sentence)
self.assertEqual(en_sentence, expected_en_sentence)
assert_nth_pair_is_equal(0, (['Vielen', 'Dank', ',', 'Chris', '.', '\n'], ['Thank', 'you', 'so', 'much', ',', 'Chris', '.', '\n']))
assert_nth_pair_is_equal(10, (['und', 'wir', 'fuhren', 'selbst', '.', '\n'], ['Driving', 'ourselves', '.', '\n']))
assert_nth_pair_is_equal(20, (['Sie', 'sagte', ':', '"', 'Ja', ',', 'das', 'ist', 'Ex-Vizepräsident', 'Al', 'Gore', 'und', 'seine',
'Frau', 'Tipper', '.', '"', '\n'], ['And', 'she', 'said', '"', 'Yes', ',', 'that', "'s", 'former',
'Vice', 'President', 'Al', 'Gore', 'and', 'his', 'wife', ',', 'Tipper', '.', '"', '\n']))
def test_iwslt2016(self):
from torchtext.experimental.datasets import IWSLT2016
train_dataset, valid_dataset, test_dataset = IWSLT2016()
self.assertEqual(len(train_dataset), 196884)
self.assertEqual(len(valid_dataset), 993)
self.assertEqual(len(test_dataset), 1305)
de_vocab, en_vocab = train_dataset.get_vocab()
def assert_nth_pair_is_equal(n, expected_sentence_pair):
de_sentence = [de_vocab.itos[index] for index in train_dataset[n][0]]
en_sentence = [en_vocab.itos[index] for index in train_dataset[n][1]]
expected_de_sentence, expected_en_sentence = expected_sentence_pair
self.assertEqual(de_sentence, expected_de_sentence)
self.assertEqual(en_sentence, expected_en_sentence)
assert_nth_pair_is_equal(0, (['David', 'Gallo', ':', 'Das', 'ist', 'Bill', 'Lange',
'.', 'Ich', 'bin', 'Dave', 'Gallo', '.', '\n'],
['David', 'Gallo', ':', 'This', 'is', 'Bill', 'Lange',
'.', 'I', "'m", 'Dave', 'Gallo', '.', '\n']))
assert_nth_pair_is_equal(10, (['Die', 'meisten', 'Tiere', 'leben', 'in',
'den', 'Ozeanen', '.', '\n'],
['Most', 'of', 'the', 'animals', 'are', 'in',
'the', 'oceans', '.', '\n']))
assert_nth_pair_is_equal(20, (['Es', 'ist', 'einer', 'meiner', 'Lieblinge', ',', 'weil', 'es',
'alle', 'möglichen', 'Funktionsteile', 'hat', '.', '\n'],
['It', "'s", 'one', 'of', 'my', 'favorites', ',', 'because', 'it', "'s",
'got', 'all', 'sorts', 'of', 'working', 'parts', '.', '\n']))
def test_multi30k(self):
from torchtext.experimental.datasets import Multi30k
# smoke test to ensure multi30k works properly
train_dataset, valid_dataset, test_dataset = Multi30k()
# This change is due to the BC breaking in spacy 3.0
self._helper_test_func(len(train_dataset), 29000, train_dataset[20],
# ([4, 444, 2531, 47, 17480, 7423, 8, 158, 10, 12, 5849, 3, 2],
([4, 444, 2529, 47, 17490, 7422, 8, 158, 10, 12, 5846, 3, 2],
[5, 61, 530, 137, 1494, 10, 9, 280, 6, 2, 3749, 4, 3]))
self._helper_test_func(len(valid_dataset), 1014, valid_dataset[30],
([4, 179, 26, 85, 1005, 57, 19, 154, 3, 2],
| |
!= "S" and word[2] != "s" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "s" + ", "
if guessChar == "T" or guessChar == "t" :
if word[1] == "T" or word[1] == "t" :
toGuess = toGuess[:1] + "t" + toGuess[2:]
if word[2] == "T" or word[2] == "t" :
toGuess = toGuess[:2] + "t" + toGuess[3:]
if word[1] != "T" and word[1] != "t" and word[2] != "T" and word[2] != "t" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "t" + ", "
if guessChar == "U" or guessChar == "u" :
if word[1] == "U" or word[1] == "u" :
toGuess = toGuess[:1] + "u" + toGuess[2:]
if word[2] == "U" or word[2] == "u" :
toGuess = toGuess[:2] + "u" + toGuess[3:]
if word[1] != "U" and word[1] != "u" and word[2] != "U" and word[2] != "u" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "u" + ", "
if guessChar == "V" or guessChar == "v" :
if word[1] == "V" or word[1] == "v" :
toGuess = toGuess[:1] + "v" + toGuess[2:]
if word[2] == "V" or word[2] == "v" :
toGuess = toGuess[:2] + "v" + toGuess[3:]
if word[1] != "V" and word[1] != "v" and word[2] != "V" and word[2] != "v" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "v" + ", "
if guessChar == "W" or guessChar == "w" :
if word[1] == "W" or word[1] == "w" :
toGuess = toGuess[:1] + "w" + toGuess[2:]
if word[2] == "W" or word[2] == "w" :
toGuess = toGuess[:2] + "w" + toGuess[3:]
if word[1] != "W" and word[1] != "w" and word[2] != "W" and word[2] != "w" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "w" + ", "
if guessChar == "X" or guessChar == "x" :
if word[1] == "X" or word[1] == "x" :
toGuess = toGuess[:1] + "x" + toGuess[2:]
if word[2] == "X" or word[2] == "x" :
toGuess = toGuess[:2] + "x" + toGuess[3:]
if word[1] != "X" and word[1] != "x" and word[2] != "X" and word[2] != "x" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "x" + ", "
if guessChar == "Y" or guessChar == "y" :
if word[1] == "Y" or word[1] == "y" :
toGuess = toGuess[:1] + "y" + toGuess[2:]
if word[2] == "Y" or word[2] == "y" :
toGuess = toGuess[:2] + "y" + toGuess[3:]
if word[1] != "Y" and word[1] != "y" and word[2] != "Y" and word[2] != "y" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "y" + ", "
if guessChar == "Z" or guessChar == "z" :
if word[1] == "Z" or word[1] == "z" :
toGuess = toGuess[:1] + "z" + toGuess[2:]
if word[2] == "Z" or word[2] == "z" :
toGuess = toGuess[:2] + "z" + toGuess[3:]
if word[1] != "Z" and word[1] != "z" and word[2] != "Z" and word[2] != "z" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "z" + ", "
if numberOfErrors == 0 :
print("\t___________")
print("\t| |")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
if numberOfErrors == 1 :
print("\t___________")
print("\t| |")
print("\t| O")
print("\t|")
print("\t|")
print("\t|")
print("\t|")
print("\n\tWord: " + toGuess)
print("\tMisses: " + wrongChars)
if "_" in toGuess and not loser :
guessChar = ""
while not guessChar.isalpha() :
guessChar = input("\n---------------------------------\nEnter your letter: ")
_ = os.system('cls' if os.name=='nt' else 'clear')
if guessChar == "A" or guessChar == "a" :
if word[1] == "A" or word[1] == "a" :
toGuess = toGuess[:1] + "a" + toGuess[2:]
if word[2] == "A" or word[2] == "a" :
toGuess = toGuess[:2] + "a" + toGuess[3:]
if word[1] != "A" and word[1] != "a" and word[2] != "A" and word[2] != "a" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "a" + ", "
if guessChar == "B" or guessChar == "b" :
if word[1] == "B" or word[1] == "b" :
toGuess = toGuess[:1] + "b" + toGuess[2:]
if word[2] == "B" or word[2] == "b" :
toGuess = toGuess[:2] + "b" + toGuess[3:]
if word[1] != "B" and word[1] != "b" and word[2] != "B" and word[2] != "b" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "b" + ", "
if guessChar == "C" or guessChar == "c" :
if word[1] == "C" or word[1] == "c" :
toGuess = toGuess[:1] + "c" + toGuess[2:]
if word[2] == "C" or word[2] == "c" :
toGuess = toGuess[:2] + "c" + toGuess[3:]
if word[1] != "C" and word[1] != "c" and word[2] != "C" and word[2] != "c" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "c" + ", "
if guessChar == "D" or guessChar == "d" :
if word[1] == "D" or word[1] == "d" :
toGuess = toGuess[:1] + "d" + toGuess[2:]
if word[2] == "D" or word[2] == "d" :
toGuess = toGuess[:2] + "d" + toGuess[3:]
if word[1] != "D" and word[1] != "d" and word[2] != "D" and word[2] != "d" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "d" + ", "
if guessChar == "E" or guessChar == "e" :
if word[1] == "E" or word[1] == "e" :
toGuess = toGuess[:1] + "e" + toGuess[2:]
if word[2] == "E" or word[2] == "e" :
toGuess = toGuess[:2] + "e" + toGuess[3:]
if word[1] != "E" and word[1] != "e" and word[2] != "E" and word[2] != "e" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "e" + ", "
if guessChar == "F" or guessChar == "f" :
if word[1] == "F" or word[1] == "f" :
toGuess = toGuess[:1] + "f" + toGuess[2:]
if word[2] == "F" or word[2] == "f" :
toGuess = toGuess[:2] + "f" + toGuess[3:]
if word[1] != "F" and word[1] != "f" and word[2] != "F" and word[2] != "f" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "f" + ", "
if guessChar == "G" or guessChar == "g" :
if word[1] == "G" or word[1] == "g" :
toGuess = toGuess[:1] + "g" + toGuess[2:]
if word[2] == "G" or word[2] == "g" :
toGuess = toGuess[:2] + "g" + toGuess[3:]
if word[1] != "G" and word[1] != "g" and word[2] != "G" and word[2] != "g" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "g" + ", "
if guessChar == "H" or guessChar == "h" :
if word[1] == "H" or word[1] == "h" :
toGuess = toGuess[:1] + "h" + toGuess[2:]
if word[2] == "H" or word[2] == "h" :
toGuess = toGuess[:2] + "h" + toGuess[3:]
if word[1] != "H" and word[1] != "h" and word[2] != "H" and word[2] != "h" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "h" + ", "
if guessChar == "I" or guessChar == "i" :
if word[1] == "I" or word[1] == "i" :
toGuess = toGuess[:1] + "i" + toGuess[2:]
if word[2] == "I" or word[2] == "i" :
toGuess = toGuess[:2] + "i" + toGuess[3:]
if word[1] != "I" and word[1] != "i" and word[2] != "I" and word[2] != "i" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "i" + ", "
if guessChar == "J" or guessChar == "j" :
if word[1] == "J" or word[1] == "j" :
toGuess = toGuess[:1] + "j" + toGuess[2:]
if word[2] == "J" or word[2] == "j" :
toGuess = toGuess[:2] + "j" + toGuess[3:]
if word[1] != "J" and word[1] != "j" and word[2] != "J" and word[2] != "j" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "j" + ", "
if guessChar == "K" or guessChar == "k" :
if word[1] == "K" or word[1] == "k" :
toGuess = toGuess[:1] + "k" + toGuess[2:]
if word[2] == "K" or word[2] == "k" :
toGuess = toGuess[:2] + "k" + toGuess[3:]
if word[1] != "K" and word[1] != "k" and word[2] != "K" and word[2] != "k" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "k" + ", "
if guessChar == "L" or guessChar == "l" :
if word[1] == "L" or word[1] == "l" :
toGuess = toGuess[:1] + "l" + toGuess[2:]
if word[2] == "L" or word[2] == "l" :
toGuess = toGuess[:2] + "l" + toGuess[3:]
if word[1] != "L" and word[1] != "l" and word[2] != "L" and word[2] != "l" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "l" + ", "
if guessChar == "M" or guessChar == "m" :
if word[1] == "M" or word[1] == "m" :
toGuess = toGuess[:1] + "m" + toGuess[2:]
if word[2] == "M" | |
# -*- coding: utf-8 -*-
"""
Mapping among MWE, lemma form and sense candidates
"""
# This code is a part of coolisf library: https://github.com/letuananh/intsem.fx
# :copyright: (c) 2014 <NAME> <<EMAIL>>
# :license: MIT, see LICENSE for more details.
MWE_ERG_PRED_LEMMA = {
'_flesh_v_out_rel' : 'flesh out'
,'_flip_v_around_rel' : 'flip around'
,'_write_v_out_rel' : 'write out'
,'_hollow_v_out_rel' : 'hollow out'
,'_match_v_up_rel' : 'match up'
,'_split_v_up_rel' : 'split up'
,'_stretch_v_over_rel' : 'stretch over'
,'_put_v_forward_rel' : 'put forward'
,'_shake_v_out_rel' : 'shake out'
,'_stick_v_around_rel' : 'stick around'
,'_wheel_v_in_rel' : 'wheel in'
,'_brick_v_up_rel' : 'brick up'
,'_whip_v_up_rel' : 'whip up'
,'_play_v_back_rel' : 'play back'
,'_search_v_out_rel' : 'search out'
,'_single_v_out_rel' : 'single out'
,'_fork_v_off_rel' : 'fork off'
,'_straighten_v_out_rel' : 'straighten out'
,'_beam_v_out_rel' : 'beam out'
,'_scare_v_away_rel' : 'scare away'
,'_bottle_v_up_rel' : 'bottle up'
,'_drop_v_by_rel' : 'drop by'
,'_ship_v_back_rel' : 'ship back'
,'_turn_v_out_rel' : 'turn out expl'
,'_cast_v_off_rel' : 'cast off'
,'_put_v_off_rel' : 'put off'
,'_show_v_up_rel' : 'show up'
,'_slam_v_on_rel' : 'slam on'
,'_coil_v_up_rel' : 'coil up'
,'_round_v_out_rel' : 'round out'
,'_get_v_away-with_rel' : 'get away with'
,'_clean_v_up_rel' : 'clean up'
,'_hew_v_out_rel' : 'hew out'
,'_buy_v_back_rel' : 'buy back'
,'_end_v_up_rel' : 'end up'
,'_gasp_v_out_rel' : 'gasp out'
,'_log_v_in_rel' : 'log in'
,'_pat_v_down_rel' : 'pat down'
,'_sign_v_on_rel' : 'sign on'
,'_hitch_v_up_rel' : 'hitch up'
,'_trot_v_out_rel' : 'trot out'
,'_finish_v_up_rel' : 'finish up'
,'_haul_v_up_rel' : 'haul up'
,'_fly_v_over_rel' : 'fly over'
,'_home_v_in_rel' : 'home in'
,'_let_v_go-of_rel' : 'let go of'
,'_write_v_up_rel' : 'write up'
,'_sign_v_up_rel' : 'sign up'
,'_drive_v_off_rel' : 'drive off'
,'_bus_v_in_rel' : 'bus in'
,'_close_v_in_rel' : 'close in'
,'_mop_v_up_rel' : 'mop up'
,'_ramp_v_down_rel' : 'ramp down'
,'_haul_v_in_rel' : 'haul in'
,'_hook_v_up_rel' : 'hook up'
,'_pony_v_up_rel' : 'pony up'
,'_shape_v_up_rel' : 'shape up'
,'_chase_v_down_rel' : 'chase down'
,'_cling_v_on_rel' : 'cling on'
,'_gnaw_v_away_rel' : 'gnaw away'
,'_box_v_in_rel' : 'box in'
,'_ride_v_up_rel' : 'ride up'
,'_put_v_away_rel' : 'put away'
,'_hold_v_on_rel' : 'hold on'
,'_knock_v_up_rel' : 'knock up'
,'_melt_v_down_rel' : 'melt down'
,'_get_v_in_rel' : 'get in'
,'_hand_v_in_rel' : 'hand in'
,'_slug_v_down_rel' : 'slug down'
,'_stop_v_off_rel' : 'stop off'
,'_crack_v_open_rel' : 'crack open'
,'_show_v_off_rel' : 'show off'
,'_shoot_v_off_rel' : 'shoot off'
,'_winnow_v_out_rel' : 'winnow out'
,'_chase_v_away_rel' : 'chase away'
,'_sell_v_out_rel' : 'sell out'
,'_take_v_up_rel' : 'take up'
,'_deck_v_out_rel' : 'deck out'
,'_nail_v_up_rel' : 'nail up'
,'_note_v_down_rel' : 'note down'
,'_throw_v_down_rel' : 'throw down'
,'_flip_v_on_rel' : 'flip on'
,'_chew_v_off_rel' : 'chew off'
,'_dig_v_in_rel' : 'dig in'
,'_team_v_up_rel' : 'team up'
,'_scribble_v_down_rel' : 'scribble down'
,'_tease_v_apart_rel' : 'tease apart'
,'_throw_v_over_rel' : 'throw over'
,'_scale_v_up_rel' : 'scale up'
,'_seal_v_off_rel' : 'seal off'
,'_weigh_v_up_rel' : 'weigh up'
,'_bring_v_up_rel' : 'bring up'
,'_hold_v_up_rel' : 'hold up on'
,'_slice_v_off_rel' : 'slice off'
,'_muddle_v_along_rel' : 'muddle along'
,'_put_v_back_rel' : 'put back'
,'_grey_v_out_rel' : 'grey out'
,'_roll_v_up_rel' : 'roll up'
,'_pump_v_in_rel' : 'pump in'
,'_beef_v_up_rel' : 'beef up'
,'_wedge_v_in_rel' : 'wedge in'
,'_blast_v_off_rel' : 'blast off'
,'_bring_v_off_rel' : 'bring off'
,'_wear_v_off_rel' : 'wear off'
,'_zone_v_out_rel' : 'zone out'
,'_tune_v_in_rel' : 'tune in'
,'_wake_v_up_rel' : 'wake x up'
,'_step_v_up_rel' : 'step up'
,'_filter_v_out_rel' : 'filter out'
,'_tip_v_over_rel' : 'tip over'
,'_inch_v_up_rel' : 'inch up'
,'_puff_v_out_rel' : 'puff out'
,'_clear_v_up_rel' : 'clear up'
,'_keel_v_over_rel' : 'keel over'
,'_pin_v_up_rel' : 'pin up'
,'_try_v_out_rel' : 'try out'
,'_cool_v_down-cause_rel' : 'cool down'
,'_hold_v_out_rel' : 'hold out'
,'_pull_v_open_rel' : 'pull open'
,'_check_v_in_rel' : 'check in'
,'_scrape_v_out_rel' : 'scrape out'
,'_call_v_up_rel' : 'call up'
,'_switch_v_on_rel' : 'switch on'
,'_chip_v_in_rel' : 'chip in'
,'_slough_v_off_rel' : 'slough off'
,'_slow_v_down_rel' : 'slow down'
,'_put_v_aside_rel' : 'put aside'
,'_wrest_v_away_rel' : 'wrest away'
,'_let_v_up_rel' : 'let up'
,'_read_v_in_rel' : 'read in'
,'_hold_v_down_rel' : 'hold down'
,'_break_v_in_rel' : 'break in'
,'_bring_v_over_rel' : 'bring over'
,'_rule_v_out_rel' : 'rule out'
,'_shrug_v_off_rel' : 'shrug off'
,'_pour_v_off_rel' : 'pour off'
,'_gather_v_up_rel' : 'gather up'
,'_flick_v_on_rel' : 'flick on'
,'_shake_v_off_rel' : 'shake off'
,'_kick_v_out_rel' : 'kick out'
,'_drag_v_down_rel' : 'drag down'
,'_whistle_v_up_rel' : 'whistle up'
,'_sober_v_up_rel' : 'sober up'
,'_move_v_on_rel' : 'move on'
,'_shop_v_around_rel' : 'shop around'
,'_rough_v_out_rel' : 'rough out'
,'_carry_v_away_rel' : 'carry away'
,'_curtain_v_off_rel' : 'curtain off'
,'_stamp_v_out_rel' : 'stamp out'
,'_bring_v_home_rel' : 'bring home'
,'_fish_v_out_rel' : 'fish out'
,'_get_v_out_rel' : 'get out'
,'_glue_v_on_rel' : 'glue on'
,'_log_v_out_rel' : 'log out'
,'_look_v_over_rel' : 'look over'
,'_rent_v_out_rel' : 'rent out'
,'_slap_v_down_rel' : 'slap down'
,'_settle_v_down_rel' : 'settle down'
,'_pop_v_down_rel' : 'pop down'
,'_chatter_v_on_rel' : 'chatter on'
,'_yank_v_out_rel' : 'yank out'
,'_vomit_v_up_rel' : 'vomit up'
,'_hand_v_out_rel' : 'hand out'
,'_drink_v_down_rel' : 'drink down'
,'_rocket_v_up_rel' : 'rocket up'
,'_nod_v_off_rel' : 'nod off'
,'_toss_v_away_rel' : 'toss away'
,'_hunt_v_up_rel' : 'hunt up'
,'_stop_v_over_rel' : 'stop over'
,'_scale_v_down_rel' : 'scale down'
,'_shove_v_in_rel' : 'shove in'
,'_bring_v_forth_rel' : 'bring forth'
,'_let_v_out_rel' : 'let out'
,'_write_v_off_rel' : 'write off'
,'_perk_v_up_rel' : 'perk up'
,'_buff_v_up_rel' : 'buff up'
,'_ramp_v_up_rel' : 'ramp up'
,'_damp_v_down_rel' : 'damp down'
,'_average_v_out_rel' : 'average out'
,'_hit_v_up_rel' : 'hit up'
,'_do_v_away-with_rel' : 'do away'
,'_come_v_on_rel' : 'come on'
,'_keep_v_on_rel' : 'keep'
,'_wander_v_off_rel' : 'wander off'
,'_shut_v_out_rel' : 'shut out'
,'_spin_v_off_rel' : 'spin off'
,'_leave_v_over_rel' : 'leave over'
,'_auction_v_off_rel' : 'auction off'
,'_hang_v_about_rel' : 'hang about'
,'_lay_v_down_rel' : 'lay down'
,'_stink_v_up_rel' : 'stink up'
,'_look_v_back-at_rel' : 'look back at'
,'_tack_v_on_rel' : 'tack on'
,'_firm_v_up_rel' : 'firm up'
,'_chip_v_away_rel' : 'chip away'
,'_shoo_v_in_rel' : 'shoo in'
,'_strike_v_up_rel' : 'strike up'
,'_ham_v_up_rel' : 'ham up'
,'_dish_v_out_rel' : 'dish out'
,'_shore_v_up_rel' : 'shore up'
,'_mask_v_out_rel' : 'mask out'
,'_plug_v_in_rel' : 'plug in'
,'_pull_v_on_rel' : 'pull on'
,'_stir_v_up_rel' : 'stir up'
,'_sweat_v_out_rel' : 'sweat out'
,'_pass_v_on_rel' : 'pass on'
,'_make_v_up-of_rel' : 'make up'
,'_peel_v_away_rel' : 'peel away'
,'_copy_v_out_rel' : 'copy out'
,'_go_v_along_rel' : 'go along with'
,'_tail_v_off_rel' : 'tail off'
,'_hole_v_up_rel' : 'hole up'
,'_quiet_v_down_rel' : 'quiet down'
,'_heat_v_up-cause_rel' : 'heat up'
,'_drag_v_on_rel' : 'drag on'
,'_head_v_out_rel' : 'head out'
,'_look_v_forward-to_rel' : 'look forward to'
,'_push_v_away_rel' : 'push away'
,'_boot_v_up_rel' : 'boot up'
,'_set_v_about_rel' : 'set about'
,'_spin_v_out_rel' : 'spin out'
,'_spit_v_out_rel' : 'spit out'
,'_lag_v_behind_rel' : 'lag behind'
,'_weigh_v_down_rel' : 'weigh down'
,'_eke_v_out_rel' : 'eke out'
,'_rub_v_out_rel' : 'rub out'
,'_seal_v_in_rel' : 'seal in'
,'_stave_v_off_rel' : 'stave off'
,'_get_v_around_rel' : 'get around'
,'_call_v_forth_rel' : 'call forth'
,'_cash_v_in_rel' : 'cash in'
,'_offer_v_up_rel' : 'offer up'
,'_play_v_up_rel' : 'play up'
,'_hide_v_out_rel' : 'hide out'
,'_hand_v_down_rel' : 'hand down'
,'_stress_v_out_rel' : 'stress out'
,'_tack_v_down_rel' : 'tack down'
,'_stir_v_in_rel' : 'stir in'
,'_live_v_up_rel' : 'live up'
,'_take_v_x-off_rel' : 'take off'
,'_let_v_down_rel' : 'let down'
,'_smuggle_v_in_rel' : 'smuggle in'
,'_fly_v_off_rel' : 'fly off'
,'_stack_v_up_rel' : 'stack up'
,'_winch_v_up_rel' : 'winch up'
,'_squeeze_v_by_rel' : 'squeeze by'
,'_shoo_v_out_rel' : 'shoo out'
,'_keep_v_out_rel' : 'keep out'
,'_eat_v_in_rel' : 'eat in'
,'_leave_v_off_rel' : 'leave off'
,'_drive_v_around_rel' : 'drive around'
,'_hang_v_out_rel' : 'hang out'
,'_let_v_on_rel' : 'let on'
,'_pack_v_in_rel' : 'pack in'
,'_give_v_up_rel' : 'give up'
,'_knock_v_off_rel' : 'knock off'
,'_make_v_up_rel' : 'make up'
,'_root_v_out_rel' : 'root out'
,'_toss_v_aside_rel' : 'toss aside'
,'_load_v_up_rel' : 'load up'
,'_do_v_up_rel' : 'do up'
,'_build_v_up_rel' : 'build up'
,'_lift_v_off_rel' : 'lift off'
,'_soak_v_off_rel' : 'soak off'
,'_gin_v_up_rel' : 'gin up'
,'_look_v_up-to_rel' : 'look up to'
,'_wander_v_up_rel' : 'wander up'
,'_well_v_up_rel' : 'well up'
,'_set_v_in_rel' : 'set in'
,'_call_v_off_rel' : 'call off'
,'_skip_v_out_rel' : 'skip out'
,'_duck_v_out_rel' : 'duck out'
,'_get_v_through_rel' : 'get through'
,'_pull_v_out-of_rel' : 'pull out of'
,'_cross_v_off_rel' : 'cross off'
,'_hunt_v_out_rel' : 'hunt out'
,'_set_v_out-aim_rel' : 'set out'
,'_come_v_along_rel' : 'come along'
,'_divide_v_up_rel' : 'divide up'
,'_bring_v_about_rel' : 'bring about'
,'_blow_v_away_rel' : 'blow away'
,'_bottom_v_out_rel' : 'bottom out'
,'_gut_v_out_rel' : 'gut out'
,'_cut_v_up_rel' : 'cut up'
,'_shout_v_out_rel' : 'shout out'
,'_rough_v_in_rel' : 'rough in'
,'_ease_v_up_rel' : 'ease up'
,'_put_v_forth_rel' : 'put forth'
,'_rest_v_up_rel' : 'rest up'
,'_square_v_away_rel' : 'square away'
,'_type_v_up_rel' : 'type up'
,'_bum_v_around_rel' : 'bum around'
,'_chop_v_up_rel' : 'chop up'
,'_tune_v_up_rel' : 'tune up'
,'_free_v_up_rel' : 'free'
,'_order_v_up_rel' : 'order up'
,'_crowd_v_out_rel' : 'crowd out'
,'_ask_v_off_rel' : 'ask off'
,'_draft_v_in_rel' : 'draft in'
,'_mail_v_in_rel' : 'mail in'
,'_put_v_down_rel' : 'put down'
,'_slam_v_down_rel' : 'slam down'
,'_saddle_v_up_rel' : 'saddle up'
,'_scrape_v_away_rel' : 'scrape away'
,'_push_v_down_rel' : 'push down'
,'_tear_v_off_rel' : 'tear off'
,'_die_v_away_rel' : 'die away'
,'_call_v_back_rel' : 'call back'
,'_smooth_v_over_rel' : 'smooth over'
,'_flip_v_down_rel' : 'flip down'
,'_want_v_back_rel' : 'want back'
,'_sponge_v_off-of_rel' : 'sponge off of'
,'_read_v_off_rel' : 'read off'
,'_zero_v_in-on_rel' : 'zero in'
,'_pucker_v_up_rel' : 'pucker up'
,'_screen_v_out_rel' : 'screen out'
,'_shove_v_through_rel' : 'shove through'
,'_kick_v_around_rel' : 'kick around'
,'_cut_v_out_rel' : 'cut out'
,'_crack_v_down_rel' : 'crack down'
,'_sniff_v_out_rel' : 'sniff out'
,'_clean_v_out_rel' : 'clean out'
,'_play_v_down_rel' : 'play down'
,'_sort_v_out_rel' : 'sort out'
,'_slim_v_down_rel' : 'slim down'
,'_die_v_off_rel' : 'die off'
,'_stay_v_over_rel' : 'stay over'
,'_whisk_v_away_rel' : 'whisk away'
,'_trim_v_away_rel' : 'trim away'
,'_clip_v_off_rel' : 'clip off'
,'_code_v_up_rel' : 'code up'
,'_throw_v_open_rel' : 'throw open'
,'_hold_v_still_rel' : 'hold still'
,'_rat_v_out_rel' : 'rat out'
,'_fire_v_up_rel' : 'fire'
,'_shoot_v_down_rel' : 'shoot down'
,'_sleep_v_off_rel' : 'sleep off'
,'_spur_v_on_rel' : 'spur on'
,'_run_v_up_rel' : 'run up'
,'_trim_v_off_rel' : 'trim off'
,'_point_v_up_rel' : 'point up'
,'_boil_v_over_rel' : 'boil over'
,'_break_v_open_rel' : 'break open'
,'_copy_v_down_rel' : 'copy down'
,'_branch_v_out_rel' : 'branch out'
,'_fog_v_up_rel' : 'fog up'
,'_gun_v_down_rel' : 'gun down'
,'_line_v_up_rel' : 'line up'
,'_bow_v_down_rel' : 'bow down'
,'_bail_v_out_rel' : 'bail out'
,'_laugh_v_off_rel' : 'laugh off'
,'_rip_v_up_rel' : 'rip up'
,'_carry_v_out_rel' : 'carry out'
,'_come_v_together_rel' : 'come together'
,'_ground_v_out_rel' : 'ground out'
,'_find_v_out-about_rel' : 'find out'
,'_render_v_up_rel' : 'render up'
,'_simmer_v_down_rel' : 'simmer down'
,'_start_v_off_rel' : 'start off'
,'_stick_v_up-for_rel' : 'stick up for'
,'_trump_v_up_rel' : 'trump up'
,'_fall_v_back_rel' : 'fall back'
,'_march_v_off_rel' : 'march off'
,'_stop_v_in_rel' : 'stop in'
,'_pull_v_out_rel' : 'pull out'
,'_wipe_v_off_rel' : 'wipe off'
,'_draw_v_down_rel' : 'draw down'
,'_mail_v_out_rel' : 'mail out'
,'_keep_v_up_rel' : 'keep up with'
,'_shell_v_out_rel' : 'shell out'
,'_back_v_out_rel' : 'back out'
,'_find_v_out_rel' : 'find out'
,'_spurt_v_out_rel' : 'spurt out'
,'_string_v_on_rel' : 'string'
,'_weigh_v_in_rel' : 'weigh in'
,'_move_v_about_rel' : 'move about'
,'_board_v_up_rel' : 'board up'
,'_tease_v_open_rel' : 'tease open'
,'_top_v_off_rel' : 'top off'
,'_pull_v_off_rel' : 'pull off'
,'_schlep_v_around_rel' : 'schlep around'
,'_swear_v_in_rel' : 'swear in'
,'_take_v_apart_rel' : 'take apart'
,'_cut_v_short_rel' : 'cut short'
,'_turn_v_out_rel' : 'turn out'
,'_spill_v_out_rel' : 'spill out'
,'_swing_v_out_rel' : 'swing out'
,'_think_v_up_rel' : 'think up'
,'_turn_v_around_rel' : 'turn around'
,'_fork_v_over_rel' : 'fork over'
,'_look_v_up-dir_rel' : 'look up'
,'_patch_v_up_rel' : 'patch up'
,'_winch_v_in_rel' : 'winch in'
,'_bow_v_out_rel' : 'bow out'
,'_chat_v_up_rel' : 'chat up'
,'_add_v_up_rel' : 'add up'
,'_define_v_away_rel' : 'define away'
,'_pack_v_up_rel' : 'pack up'
,'_call_v_down_rel' : 'call down'
,'_come_v_through_rel' : 'come through'
,'_flip_v_off_rel' : 'flip off'
,'_hold_v_up_rel' : 'hold up'
,'_rinse_v_out_rel' : 'rinse out'
,'_key_v_in_rel' : 'key in'
,'_break_v_away_rel' : 'break away'
,'_set_v_out_rel' : 'set out'
,'_show_v_through_rel' : 'show through'
,'_hide_v_away_rel' : 'hide away'
,'_pile_v_on_rel' : 'pile on'
,'_blow_v_out_rel' : 'blow out'
,'_sound_v_off_rel' : 'sound off'
,'_lock_v_out_rel' : 'lock out'
,'_stick_v_on_rel' | |
<filename>pommerman/agents/gameNodes/game_node.py
import numpy as np
import random
import copy
from collections import defaultdict
import time
from pommerman import utility, constants, characters, forward_model, agents
from pommerman.agents import helper_func
#from agent_classes import Agent
class State():
def am_I_alive(self):
for i, agent in enumerate(self.curr_agents):
if (agent.agent_id + 10 == self.self_agent.value):
return agent.is_alive
def __init__(self, obs, init=False, bombing_agents = {}):
self._game_mode = constants.GameType.FFA
self.move = None
self._obs = obs
self._my_position = tuple(obs['position'])
self._board = np.array(obs['board'])
self._bomb_life = np.array(self._obs['bomb_life'])
self._teammate = obs['teammate']
self._enemies = [constants.Item(e) for e in obs['enemies']]
self._ammo = int(obs['ammo'])
self.fm = forward_model.ForwardModel()
self.self_agent = self.find_self_agent(self._obs)
agents_id = [constants.Item.Agent0, constants.Item.Agent1, \
constants.Item.Agent2, constants.Item.Agent3]
self._agents = [characters.Bomber(aid.value, "FFA") for aid in agents_id] # remember to modifiy if it is team or radio mode
self.bombing_agents = copy.deepcopy(bombing_agents)
self.score = 0
if init:
self.curr_flames = self.convert_flames(self._board) # determine by confirming the map
self.curr_bombs = self.convert_bombs(np.array(obs['bomb_blast_strength']), np.array(obs['bomb_life']))
self.curr_items = self.convert_items(self._board)
self.curr_agents = self.convert_agents(self._board)
self.last_items = self.curr_items
if(bombing_agents != {}):
self.curr_bombs = self.convert_bombs_two(np.array(self._obs['bomb_blast_strength']), self._bomb_life, bombing_agents)
def advance_game_on_copy(self, action):
board = copy.deepcopy(self._board)
curr_flames = self.convert_flames(board)
curr_items = self.convert_items(board)
curr_agents = self.convert_agents(board)
bombing_agents = copy.deepcopy(self.bombing_agents)
actions = [-1 for i in range(4)]
self_agent_value = self.self_agent if type(self.self_agent) == int else self.self_agent.value
self.self_agent_value = self_agent_value
actions[self_agent_value - 10] = action
agents_to_bomb = []
#create the info for the enemies and give them info
for i, agent in enumerate(self.curr_agents):
if(agent.is_alive):
if (agent.agent_id + 10 != self_agent_value):
copy_obs = copy.deepcopy(self._obs)
# modify enemies
agent_idx = None
print(copy_obs)
for j, enemy in enumerate(copy_obs['enemies']):
enemyId = enemy if type(enemy) == int else enemy.value
print(agent.agent_id + 10, enemyId)
if agent.agent_id + 10 == enemyId:
agent_idx = j
break
# ignore teammate at all, otherwise, try to replace teammate, and enemy at the same time
if not agent_idx:
actions[i] = 0
continue
agent_val = copy_obs['enemies'][agent_idx] if type(copy_obs['enemies'][agent_idx]) == int else copy_obs['enemies'][agent_idx].value
del copy_obs['enemies'][agent_idx]
copy_obs['enemies'].append(self.self_agent)
# modify my position
my_position = np.where(self._board == agent.agent_id + 10)
copy_obs['position'] = (my_position[0][0], my_position[1][0])
# fuse_everything_in_new_obs
copy_obs['ammo'] = 1
# actions.append(agent.act(copy_obs)) # REFRACTION --> place action according to the agent id
agent_action = agent.act(copy_obs, [constants.Action.Up, constants.Action.Stop, constants.Action.Down, constants.Action.Left, constants.Action.Right, 5])
actions[agent_val - 10] = agent_action
#they are responsible for dropping a bomb
if (agent_action == 5):
agents_to_bomb.append(((my_position[0][0], my_position[1][0]), agent.agent_id))
#calc the bombs for this board
curr_bombs = self.convert_bombs_two(np.array(self._obs['bomb_blast_strength']), self._bomb_life, bombing_agents)
temp_board, temp_curr_agent,temp_curr_bombs, temp_curr_items, temp_curr_flames = self.fm.step(actions, board, curr_agents, curr_bombs, curr_items, curr_flames)
#if an enemy or player is going to bomb, add it
for a in agents_to_bomb:
bombing_agents[a[0]] = a[1]
if action == 5:
bombing_agents[(self._my_position[0], self._my_position[1])] = self_agent_value - 10
return temp_board, temp_curr_agent,temp_curr_bombs, temp_curr_items, temp_curr_flames, bombing_agents
def get_all_possible_states(self):
list_of_states = []
moves = [constants.Action.Stop, constants.Action.Up, constants.Action.Down, constants.Action.Left, constants.Action.Right, 5]
#check if move will land me on top of a bomb
#unsafe_directions = self._directions_in_range_of_bomb(self._board, self._my_position, self.curr_bombs)
#if unsafe_directions:
# if(len(unsafe_directions) != 4):
# for i in unsafe_directions:
#print("get all possible states, removing unsafe move", i)
# moves.remove(i)
# #if I am on a bomb, remove stop
# if self._bomb_life[self._my_position[0]][self._my_position[1]] > 0:
# if constants.Action.Stop in moves:
# moves.remove(constants.Action.Stop)
lost_all_moves = False
if len(moves) == 0:
lost_all_moves = True
# input("FK<NAME>")
moves = [constants.Action.Up, constants.Action.Down, constants.Action.Left, constants.Action.Right, constants.Action.Stop, 5]
for move in moves:
if move == 5 or utility.is_valid_direction(self._board, self._my_position, move):
#check if position is passible
check_pos = None
if move == constants.Action.Up:
check_pos = (self._my_position[0]-1, self._my_position[1])
elif move == constants.Action.Down:
check_pos = (self._my_position[0]+1, self._my_position[1])
elif move == constants.Action.Left:
check_pos = (self._my_position[0], self._my_position[1]-1)
elif move == constants.Action.Right:
check_pos = (self._my_position[0], self._my_position[1]+1)
if check_pos != None:
if not utility.position_is_passable(self._board, check_pos, self._enemies):
#if i am blocked by a bomb, try kicking
if self._obs['can_kick']:
if move == constants.Action.Up:
if self._board[self._my_position[0]-1][self._my_position[1]] == 3:
if self._my_position[0] - 2 >= 0:
if self._board[self._my_position[0]-2][self._my_position[1]] != 0:
# print("removing non passable move", move)
continue
else:
# print("removing non passable move", move)
continue
elif move == constants.Action.Down:
if self._board[self._my_position[0]+1][self._my_position[1]] == 3:
if self._my_position[0] + 2 < 11:
if self._board[self._my_position[0]+2][self._my_position[1]] != 0:
# print("removing non passable move", move)
continue
else:
# print("removing non passable move", move)
continue
elif move == constants.Action.Left:
if self._board[self._my_position[0]][self._my_position[1]-1] == 3:
if self._my_position[1] - 2 >= 0:
if self._board[self._my_position[0]][self._my_position[1]-2] != 0:
# print("removing non passable move", move)
continue
else:
# print("removing non passable move", move)
continue
elif move == constants.Action.Right:
if self._board[self._my_position[0]][self._my_position[1]+1] == 3:
if self._my_position[1] + 2 < 11:
if self._board[self._my_position[0]][self._my_position[1]+2] != 0:
# print("removing non passable move", move)
continue
else:
# print("removing non passable move", move)
continue
else:
# print("removing non passable move", move)
continue
else:
# print("removing non passable move", move)
continue
#check to see if its a safe dir
if move == 5 and self._ammo == 0:
# print("bombing without a bomb, skip")
#can not bomb with no ammo
continue
#if I am on a bomb, lets not bomb
if move == 5 and self._my_position in self.bombing_agents:
# print("bombing while on bomb, skip")
continue
print(move)
temp_board, temp_curr_agent,temp_curr_bombs, temp_curr_items, temp_curr_flames, bombing_agents = self.advance_game_on_copy(move)
temp_obs = self.fm.get_observations(temp_board, temp_curr_agent,temp_curr_bombs, False, 11)[self.self_agent_value - 10 ]
temp_obs['ammo']= self._ammo
if move == 5:
bombing_agents[(self._my_position[0], self._my_position[1])] = self.self_agent_value - 10
temp_obs['ammo'] = self._ammo - 1
temp_obs['enemies'] = self._enemies
temp_state = State(temp_obs, True)
temp_state.bombing_agents = bombing_agents
temp_state.move = move
temp_state.score = temp_state.get_score()
temp_state.score -= 0.1
#IF THE SCORE IS NEGATIVE, WE DONT WANT THIS STATE
#IF THE AGENT IS DEAD, NEGATIVE
if not temp_state.am_I_alive:
temp_state.score -= 100
if lost_all_moves == True:
temp_state.score -= 200
list_of_states.append(temp_state)
return list_of_states
def advance_game(self, action):
actions = [-1 for i in range(4)]
self_agent = self.self_agent
actions[self_agent.value - 10] = action
for i, agent in enumerate(self.curr_agents):
my_position = np.where(self._board == agent.agent_id + 10)
agent.is_alive = my_position[0]
if(agent.is_alive):
if (agent.agent_id + 10 != self_agent.value):
copy_obs = copy.deepcopy(self._obs)
# modify enemies
agent_idx = None
for j, enemy in enumerate(copy_obs['enemies']):
if agent.agent_id + 10 == enemy.value:
agent_idx = j
break
if not agent_idx:
actions[i] = 0
continue
agent_val = copy_obs['enemies'][agent_idx].value
del copy_obs['enemies'][agent_idx]
copy_obs['enemies'].append(self_agent)
# modify my position
copy_obs['position'] = (my_position[0][0], my_position[1][0])
# fuse_everything_in_new_obs
copy_obs['ammo'] = 1
# actions.append(agent.act(copy_obs)) # REFRACTION --> place action according to the agent id
agent_action = agent.act(copy_obs, [constants.Action.Stop, constants.Action.Up, constants.Action.Down, constants.Action.Left, constants.Action.Right, 5])
actions[agent_val - 10] = agent_action
#they are responsible for dropping a bomb
if (agent_action == 5):
self.bombing_agents[ (my_position[0][0], my_position[1][0])] = agent.agent_id
self.last_items = self.curr_items
self._board, self.curr_agents, self.curr_bombs, self.curr_items, self.curr_flames = \
self.fm.step(actions, self._board, self.curr_agents, self.curr_bombs, self.curr_items, self.curr_flames)
self.update_obs()
self.score -= 0.1
def update_obs(self):
self._obs = self.fm.get_observations(self._board, self.curr_agents, self.curr_bombs, False, 11)[self.self_agent.value - 10 ]
self._obs['enemies'] = self._enemies
def copy_from(self, source_node):
self._obs = copy.deepcopy(source_node._obs)
self._board = copy.deepcopy(source_node.curr_agents)
self.curr_bombs = copy.deepcopy(source_node.curr_bombs)
self.curr_items = copy.deepcopy(source_node.curr_items)
self.curr_flames = copy.deepcopy(source_node.curr_flames)
self.score = source_node.score
self.last_items = copy.deepcopy(source_node.last_items)
def get_score(self):
score = 0
# self_agent = self.self_agent
self_agent_value = self.self_agent if type(self.self_agent) == int else self.self_agent.value
#if the enemy agent is not alive, then the score increases
for i, agent in enumerate(self.curr_agents):
if 10 + i != self_agent_value:
if not agent.is_alive :
score += 5
else:
#if we are dead, fk
if not agent.is_alive:
score -= score * 0.95
# if the agent is close to its enemy, then the score goes up
self_agent_instance = self.curr_agents[self_agent_value - 10]
for i, agent in enumerate(self.curr_agents):
if 10 + i == self_agent_value:
continue
if not agent.is_alive:
continue
tar, tac = agent.position # target agent row, target agent column
sar, sac = self_agent_instance.position
distance = abs(tar - sar) + abs(tac - sac)#(((tar - sar) ** 2 + (tac - sac) ** 2) ** 0.5
if distance != 0:
score += (1 / distance * 5)*5
# if the agent has eaten good stuff, then score goes up
if self._obs['position'] in self.last_items:
val = self.last_items[self._obs['position']]
# if val != constants.Item.Skull.value:
score += 5
# else:
# score -= 5
| |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fileinput
import json
import logging
import os
import re
from collections import namedtuple
from wlgen import Workload
from devlib.utils.misc import ranges_to_list
_Phase = namedtuple('Phase', 'duration_s, period_ms, duty_cycle_pct')
class Phase(_Phase):
"""
Descriptor for an RT-App load phase
:param duration_s: the phase duration in [s]
:type duration_s: int
:param period_ms: the phase period in [ms]
:type period_ms: int
:param duty_cycle_pct: the generated load in [%]
:type duty_cycle_pct: int
"""
pass
class RTA(Workload):
def __init__(self,
target,
name,
calibration=None):
self.logger = logging.getLogger('rtapp')
# rt-app calibration
self.pload = calibration
# TODO: Assume rt-app is pre-installed on target
# self.target.setup('rt-app')
super(RTA, self).__init__(target, name, calibration)
# rt-app executor
self.wtype = 'rtapp'
self.executor = 'rt-app'
# Default initialization
self.json = None
self.rta_profile = None
self.loadref = None
self.rta_cmd = None
self.rta_conf = None
self.test_label = None
# Setup RTA callbacks
self.setCallback('postrun', self.__postrun)
@staticmethod
def calibrate(target):
pload_regexp = re.compile(r'pLoad = ([0-9]+)ns')
pload = {}
# target.cpufreq.save_governors()
target.cpufreq.set_all_governors('performance')
for cpu in target.list_online_cpus():
logging.info('CPU%d calibration...', cpu)
max_rtprio = int(target.execute('ulimit -Hr').split('\r')[0])
logging.debug('Max RT prio: %d', max_rtprio)
if max_rtprio > 10:
max_rtprio = 10
rta = RTA(target, 'rta_calib')
rta.conf(kind='profile',
params = {
'task1': Periodic(
period_ms=100,
duty_cycle_pct=50,
duration_s=1,
sched={
'policy': 'FIFO',
'prio' : max_rtprio
}
).get()
},
cpus=[cpu])
rta.run(as_root=True)
for line in rta.getOutput().split('\n'):
pload_match = re.search(pload_regexp, line)
if pload_match is None:
continue
pload[cpu] = int(pload_match.group(1))
logging.debug('>>> cpu%d: %d', cpu, pload[cpu])
# target.cpufreq.load_governors()
logging.info('Target RT-App calibration:')
logging.info('%s',
"{" + ", ".join('"%r": %r' % (key, pload[key]) for key in pload) + "}")
return pload
def __postrun(self, params):
destdir = params['destdir']
if destdir is None:
return
self.logger.debug('%14s - Pulling logfiles to [%s]...',
'RTApp', destdir)
for task in self.tasks.keys():
logfile = "'{0:s}/*{1:s}*.log'"\
.format(self.run_dir, task)
self.target.pull(logfile, destdir)
self.logger.debug('%14s - Pulling JSON to [%s]...',
'RTApp', destdir)
self.target.pull('{}/{}'.format(self.run_dir, self.json), destdir)
logfile = '{}/output.log'.format(destdir)
self.logger.debug('%14s - Saving output on [%s]...',
'RTApp', logfile)
with open(logfile, 'w') as ofile:
for line in self.output['executor'].split('\n'):
ofile.write(line+'\n')
def _getFirstBiggest(self, cpus):
# Non big.LITTLE system:
if 'bl' not in self.target.modules:
# return the first CPU of the last cluster
platform = self.target.platform
cluster_last = list(set(platform.core_clusters))[-1]
cluster_cpus = [cpu_id
for cpu_id, cluster_id in enumerate(platform.core_clusters)
if cluster_id == cluster_last]
# If CPUs have been specified': return the fist in the last cluster
if cpus:
for cpu_id in cpus:
if cpu_id in cluster_cpus:
return cpu_id
# Otherwise just return the first cpu of the last cluster
return cluster_cpus[0]
# big.LITTLE system:
for c in cpus:
if c not in self.target.bl.bigs:
continue
return c
# Only LITTLE CPUs, thus:
# return the first possible cpu
return cpus[0]
def _getFirstBig(self, cpus=None):
# Non big.LITTLE system:
if 'bl' not in self.target.modules:
return self._getFirstBiggest(cpus)
if cpus:
for c in cpus:
if c not in self.target.bl.bigs:
continue
return c
# Only LITTLE CPUs, thus:
# return the first big core of the system
if self.target.big_core:
# Big.LITTLE system
return self.target.bl.bigs[0]
return 0
def _getFirstLittle(self, cpus=None):
# Non big.LITTLE system:
if 'bl' not in self.target.modules:
# return the first CPU of the first cluster
platform = self.target.platform
cluster_first = list(set(platform.core_clusters))[0]
cluster_cpus = [cpu_id
for cpu_id, cluster_id in enumerate(platform.core_clusters)
if cluster_id == cluster_first]
# If CPUs have been specified': return the fist in the first cluster
if cpus:
for cpu_id in cpus:
if cpu_id in cluster_cpus:
return cpu_id
# Otherwise just return the first cpu of the first cluster
return cluster_cpus[0]
# Try to return one LITTLE CPUs among the specified ones
if cpus:
for c in cpus:
if c not in self.target.bl.littles:
continue
return c
# Only big CPUs, thus:
# return the first LITTLE core of the system
if self.target.little_core:
# Big.LITTLE system
return self.target.bl.littles[0]
return 0
def getTargetCpu(self, loadref):
# Select CPU for task calibration, which is the first little
# of big depending on the loadref tag
if self.pload is not None:
if loadref and loadref.upper() == 'LITTLE':
target_cpu = self._getFirstLittle()
self.logger.debug('%14s - ref on LITTLE cpu: %d',
'RTApp', target_cpu)
else:
target_cpu = self._getFirstBig()
self.logger.debug('%14s - ref on big cpu: %d',
'RTApp', target_cpu)
return target_cpu
# These options are selected only when RTApp has not been
# already calibrated
if self.cpus is None:
target_cpu = self._getFirstBig()
self.logger.debug('%14s - ref on cpu: %d',
'RTApp', target_cpu)
else:
target_cpu = self._getFirstBiggest(self.cpus)
self.logger.debug('%14s - ref on (possible) biggest cpu: %d',
'RTApp', target_cpu)
return target_cpu
def getCalibrationConf(self, target_cpu=0):
if self.pload is None:
return 'CPU{0:d}'.format(target_cpu)
return self.pload[target_cpu]
def _confCustom(self):
if self.duration is None:
raise ValueError('Workload duration not specified')
target_cpu = self.getTargetCpu(self.loadref)
calibration = self.getCalibrationConf(target_cpu)
self.json = '{0:s}_{1:02d}.json'.format(self.name, self.exc_id)
ofile = open(self.json, 'w')
ifile = open(self.params['custom'], 'r')
replacements = {
'__DURATION__' : str(self.duration),
'__PVALUE__' : str(calibration),
'__LOGDIR__' : str(self.run_dir),
'__WORKDIR__' : '"'+self.target.working_directory+'"',
}
for line in ifile:
for src, target in replacements.iteritems():
line = line.replace(src, target)
ofile.write(line)
ifile.close()
ofile.close()
return self.json
def _confProfile(self):
# Task configuration
target_cpu = self.getTargetCpu(self.loadref)
self.rta_profile = {
'tasks': {},
'global': {}
}
# Initialize global configuration
global_conf = {
'default_policy': 'SCHED_OTHER',
'duration': -1,
'calibration': 'CPU'+str(target_cpu),
'logdir': self.run_dir,
}
# Setup calibration data
calibration = self.getCalibrationConf(target_cpu)
global_conf['calibration'] = calibration
if self.duration is not None:
global_conf['duration'] = self.duration
self.logger.warn('%14s - Limiting workload duration to %d [s]',
'RTApp', global_conf['duration'])
else:
self.logger.info('%14s - Workload duration defined by longest task',
'RTApp')
# Setup default scheduling class
if 'policy' in self.sched:
policy = self.sched['policy'].upper()
if policy not in ['OTHER', 'FIFO', 'RR', 'DEADLINE']:
raise ValueError('scheduling class {} not supported'\
.format(policy))
global_conf['default_policy'] = 'SCHED_' + self.sched['policy']
self.logger.info('%14s - Default policy: %s',
'RTApp', global_conf['default_policy'])
# Setup global configuration
self.rta_profile['global'] = global_conf
# Setup tasks parameters
for tid in sorted(self.params['profile'].keys()):
task = self.params['profile'][tid]
# Initialize task configuration
task_conf = {}
if 'sched' not in task:
policy = 'DEFAULT'
else:
policy = task['sched']['policy'].upper()
if policy == 'DEFAULT':
task_conf['policy'] = global_conf['default_policy']
sched_descr = 'sched: using default policy'
elif policy not in ['OTHER', 'FIFO', 'RR', 'DEADLINE']:
raise ValueError('scheduling class {} not supported'\
.format(task['sclass']))
else:
task_conf.update(task['sched'])
task_conf['policy'] = 'SCHED_' + policy
sched_descr = 'sched: {0:s}'.format(task['sched'])
# Initialize task phases
task_conf['phases'] = {}
self.logger.info('%14s - ------------------------', 'RTApp')
self.logger.info('%14s - task [%s], %s', 'RTApp', tid, sched_descr)
if 'delay' in task.keys():
if task['delay'] > 0:
task['delay'] = int(task['delay'] * 1e6)
task_conf['phases']['p000000'] = {}
task_conf['phases']['p000000']['delay'] = task['delay']
self.logger.info('%14s - | start delay: %.6f [s]',
'RTApp', task['delay'] / 1e6)
self.logger.info('%14s - | calibration CPU: %d',
'RTApp', target_cpu)
if 'loops' not in task.keys():
task['loops'] = 1
task_conf['loop'] = task['loops']
self.logger.info('%14s - | loops count: %d',
'RTApp', task['loops'])
# Setup task affinity
if 'cpus' in task and task['cpus']:
task_conf['cpus'] = ranges_to_list(task['cpus'])
self.logger.info('%14s - | CPUs affinity: %s',
'RTApp', task['cpus'])
# Setup task configuration
self.rta_profile['tasks'][tid] = task_conf
# Getting task phase descriptor
pid=1
for phase in task['phases']:
# Convert time parameters to integer [us] units
duration = int(phase.duration_s * 1e6)
period = int(phase.period_ms * 1e3)
# A duty-cycle of 0[%] translates on a 'sleep' phase
if phase.duty_cycle_pct == 0:
self.logger.info('%14s - + phase_%06d: sleep %.6f [s]',
'RTApp', pid, duration/1e6)
task_phase = {
'loop': 1,
'sleep': duration,
}
# A duty-cycle of 100[%] translates on a 'run-only' phase
elif phase.duty_cycle_pct == 100:
self.logger.info('%14s - + phase_%06d: batch %.6f [s]',
'RTApp', pid, duration/1e6)
task_phase = {
'loop': 1,
'run': duration,
}
# A certain number of loops is requires to generate the
# proper load
else:
cloops = -1
if duration >= 0:
cloops = int(duration / period)
sleep_time = period * (100 - phase.duty_cycle_pct) / 100
running_time = period - sleep_time
self.logger.info(
'%14s - + phase_%06d: duration %.6f [s] (%d loops)',
'RTApp', pid, duration/1e6, cloops)
self.logger.info(
'%14s - | period %6d | |
""" Handle MDIO interface via bitbang and SPI bus. """
from typing import List, Optional
from pyrpio.gpio import CdevGPIO
from pyrpio.spi import SPI
class MDIO:
""" Bit-bang MDIO interface. """
C22_FRAME = 0x01
C45_FRAME = 0x00
OP_C22_WR = 0x01
OP_C22_RD = 0x02
OP_C45_AD = 0x00
OP_C45_WR = 0x01
OP_C45_RD_INC = 0x02
OP_C45_RD = 0x03
def __init__(self, clk_pin: int, data_pin: int, path: str, **kwargs):
"""
Bit-bang MDIO interface via cdev gpio.
Args:
clk_pin (int): GPIO pin of clock
data_pin (int): GPIO pin of data
"""
self.clk_pin = clk_pin
self.data_pin = data_pin
self.clk_gpio = CdevGPIO(path=path, line=clk_pin, direction="low")
self.data_gpio = CdevGPIO(path=path, line=data_pin, direction="high", bias="pull_up")
self._clock_delay = kwargs.get('clock_delay', 50)
self._setup_delay = kwargs.get('setup_delay', 10)
self._read_delay = kwargs.get('read_delay', 1000)
def open(self):
""" Open mdio bus. """
def close(self):
""" Close mdio bus. """
self.clk_gpio.close()
self.data_gpio.close()
def _ndelay(self, delay): # pylint: disable=no-self-use
while delay > 0:
delay -= 1
def _write_bit(self, val: int):
self._ndelay(self._clock_delay)
self.data_gpio.write(bool(val))
self._ndelay(self._setup_delay)
self.clk_gpio.write(True)
self._ndelay(self._clock_delay)
self.clk_gpio.write(False)
def _read_bit(self) -> int:
self._ndelay(self._clock_delay)
v = int(self.data_gpio.read())
self._ndelay(self._setup_delay)
self.clk_gpio.write(True)
self._ndelay(self._clock_delay)
self.clk_gpio.write(False)
return v
def _write_bits(self, val, bits):
for i in range(bits - 1, -1, -1):
self._write_bit((val >> i) & 1)
def _read_bits(self, bits) -> int:
ret = 0
for _ in range(bits - 1, -1, -1):
ret <<= 1
ret |= self._read_bit()
return ret
def _flush(self):
for _ in range(32):
self._write_bit(1)
def _cmd(self, sf, op, pad, dad):
# Preamble
self._flush()
# Header
self._write_bits(sf & 3, 2) # Start frame
self._write_bits(op & 3, 2) # OP Code
self._write_bits(pad, 5) # Phy addr
self._write_bits(dad, 5) # Reg addr(C22) / dev type(C45)
def _c45_write_addr(self, pad: int, dad: int, reg: int):
# Send preamble/header - C45 - ADDR
self._cmd(MDIO.C45_FRAME, MDIO.OP_C45_AD, pad, dad)
# Send the turnaround(10)
self._write_bits(2, 2)
# Send 16-bit value
self._write_bits(reg, 16)
return 0
def _c45_write_val(self, pad: int, dad: int, val: int):
# Send preamble/header - C45 - WRITE
self._cmd(MDIO.C45_FRAME, MDIO.OP_C45_WR, pad, dad)
# Send the turnaround(10)
self._write_bits(2, 2)
# Send 16-bit value
self._write_bits(val, 16)
return 0
def _c45_read_val(self, pad: int, dad: int) -> int:
# Send preamble/header
self._cmd(MDIO.C45_FRAME, MDIO.OP_C45_RD, pad, dad)
# Release data pin
self.data_gpio.direction = "in"
self._ndelay(self._read_delay)
# Read 2-bit turnaround(gives slave time)
self._read_bits(2)
# Read 16-bit value
ret = self._read_bits(16)
# Capture data pin
self.data_gpio.direction = "high"
return ret
def read_c22_register(self, pad: int, reg: int):
""" Read reg in CLAUSE22. [01|01|5-bit pad|5-bit reg|XX|16-bit val]
Args:
pad (int): 5-bit physical address
reg (int): 5-bit register address
Returns:
int: 16-bit register value
"""
# Send preamble/header
self._cmd(MDIO.C22_FRAME, MDIO.OP_C22_RD, pad, reg)
# Release data pin
self.data_gpio.direction = "in"
self._ndelay(self._read_delay)
# Read 2-bit turnaround (gives slave time)
self._read_bits(2)
# Read 16-bit value
ret = self._read_bits(16)
# Capture data pin
self.data_gpio.direction = "high"
self._flush()
return ret
def read_c45_register(self, pad: int, dad: int, reg: int):
""" Read reg in CLAUSE45.
[00|00|5-bit pad|5-bit dad|XX|16-bit reg]
[00|11|5-bit pad|5-bit dad|XX|16-bit val]
Args:
pad (int): 5-bit physical address
dad (int): 5-bit device type
reg (int): 16-bit register address
Returns:
int: 16-bit register value
"""
self._c45_write_addr(pad, dad, reg)
val = self._c45_read_val(pad, dad)
self._flush()
return val
def read_c45_dword_register(self, pad: int, dad: int, reg: int):
""" Read 32-bit reg in CLAUSE45.
[00|00|5-bit pad|5-bit dad|XX|16-bit LSB reg]
[00|00|5-bit pad|5-bit dad|XX|16-bit MSB reg]
[00|11|5-bit pad|5-bit dad|XX|16-bit LSB val]
[00|11|5-bit pad|5-bit dad|XX|16-bit MSB val]
Args:
pad (int): 5-bit physical address
dad (int): 5-bit device type
reg (int): 32-bit register address
Returns:
int: 32-bit register value
"""
self._c45_write_addr(pad, dad, reg & 0xFFFF)
self._c45_write_addr(pad, dad, reg >> 16)
val_lsb = self._c45_read_val(pad, dad)
val_msb = self._c45_read_val(pad, dad)
self._flush()
return (val_msb << 16) & (val_lsb & 0xFFFF)
def write_c22_register(self, pad: int, reg: int, val: int):
""" Write reg in CLAUSE22. [01|01|5-bit pad|5-bit reg|01|16-bit val]
Args:
pad (int): 5-bit physical address
reg (int): 5-bit register address
val (int): 16-bit register value
"""
# Send preamble/header
self._cmd(MDIO.C22_FRAME, MDIO.OP_C22_WR, pad, reg)
# Send the turnaround (10)
self._write_bits(2, 2)
# Send 16-bit value
self._write_bits(val, 16)
self._flush()
def write_c45_register(self, pad: int, dad: int, reg: int, val: int):
""" Write reg in CLAUSE45.
[00|00|5-bit pad|5-bit dad|01|16-bit reg]
[00|01|5-bit pad|5-bit dad|01|16-bit val]
Args:
pad (int): 5-bit physical address
dad (int): 5-bit device type
reg (int): 16-bit register address
val (int): 16-bit register value
"""
self._c45_write_addr(pad, dad, reg)
rst = self._c45_write_val(pad, dad, val)
self._flush()
return rst
def write_c45_dword_register(self, pad: int, dad: int, reg: int, val: int):
""" Write 32-bit reg in CLAUSE45.
[00|00|5-bit pad|5-bit dad|01|16-bit LSB reg]
[00|00|5-bit pad|5-bit dad|01|16-bit MSB reg]
[00|01|5-bit pad|5-bit dad|01|16-bit LSB val]
[00|01|5-bit pad|5-bit dad|01|16-bit MSB val]
Args:
pad (int): 5-bit physical address
dad (int): 5-bit device type
reg (int): 32-bit register address
val (int): 32-bit register value
"""
self._c45_write_addr(pad, dad, reg & 0xFFFF)
self._c45_write_addr(pad, dad, reg >> 16)
rst = self._c45_write_val(pad, dad, val & 0xFFFF)
rst |= self._c45_write_val(pad, dad, val >> 16)
self._flush()
return rst
def read_c22_registers(self, pad: int, regs: List[int]):
""" Read multiple registers in CLAUSE22.
Args:
pad (int): 5-bit physical address
regs (List[int]): List of 5-bit register addreses
Return:
List[int]: List of 16-bit register values
"""
return [self.read_c22_register(pad, reg) for reg in regs]
def read_c45_registers(self, pad: int, dad: int, regs: List[int]):
""" Read multiple registers in CLAUSE45.
NOTE: C45 supports read and addr++ but not sure if supported.
Args:
pad (int): 5-bit physical address
dad (int): 5-bit device type
regs (List[int]): List of 16-bit register addreses
Return:
List[int]: List of 16-bit register values
"""
return [self.read_c45_register(pad, dad, reg) for reg in regs]
def read_c45_dword_registers(self, pad: int, dad: int, regs: List[int]):
""" Read multiple dword registers in CLAUSE45.
NOTE: C45 supports read and addr++ but not sure if supported.
Args:
pad (int): 5-bit physical address
dad (int): 5-bit device type
regs (List[int]): List of 32-bit register addreses
Return:
List[int]: List of 32-bit register values
"""
return [self.read_c45_dword_register(pad, dad, reg) for reg in regs]
def write_c22_registers(self, pad: int, regs: List[int], vals: List[int]):
""" Write multiple registers in CLAUSE22.
Args:
pad (int): 5-bit physical address
regs (List[int]): List of 5-bit register addreses
vals (List[int]): List of 16-bit register values
"""
return [self.write_c22_register(pad, reg, val) for reg, val in zip(regs, vals)]
def write_c45_registers(self, pad: int, dad: int, regs: List[int], vals: List[int]):
""" Write multiple registers in CLAUSE45.
Args:
pad (int): 5-bit physical address
dad (int): 5-bit device type
regs (List[int]): List of 16-bit register addreses
vals (List[int]): List of 16-bit register values
"""
return [self.write_c45_register(pad, dad, reg, val) for reg, val in zip(regs, vals)]
def write_c45_dword_registers(self, pad: int, dad: int, regs: List[int], vals: List[int]):
""" Write multiple dword registers in CLAUSE45.
Args:
pad (int): 5-bit physical address
dad (int): 5-bit device type
regs (List[int]): List of 32-bit register addreses
vals (List[int]): List of 32-bit register values
"""
return [self.write_c45_dword_register(pad, dad, reg, val) for reg, val in zip(regs, vals)]
class MDIOSPI:
"""Peform MDIO over SPI interface.
Requires MOSI and MISO to be tied together with external pull-up
Chip select is not used either since MDIO packet contains phy_addr
"""
C22_FRAME = 0x01
C45_FRAME = 0x00
OP_C22_WR = 0x01
OP_C22_RD = 0x02
OP_C45_AD = 0x00
OP_C45_WR = 0x01
OP_C45_RD_INC = 0x02
OP_C45_RD = 0x03
def __init__(self, path: str = '/dev/spidev0.0'):
"""
SPI-based MDIO interface.
Args:
path: spidev bus path
"""
self.path: str = path
self._bus: Optional[SPI]
def open(self, speed_hz: int = 5000):
""" Open mdio bus. """
self._bus = SPI(self.path, 0, speed_hz, extra_flags=SPI.SPI_3WIRE)
def close(self):
""" Close mdio bus. """
self._bus.close()
def mdio_flush(self):
""" Flush bus by sending 32 1's """
self._bus.transfer(tx_data=(0xFFFFFFFF).to_bytes(4, byteorder='big'), cs_change=False)
def mdio_xfer(self, st: int, op: int, pad: int, dad: int, tat: int = 0x2, val: int = 0xFFFF):
""" Perform low-level 32-bit frame transfer.
Args:
st (int): 2-bit start field
op (int): 2-bit operation field
pad (int): 5-bit physical address
dad (int): 5-bit register address / device type
tat (int): 2-bit turn around field
val (int): 16-bit write value
"""
is_read = op in [MDIOSPI.OP_C22_RD, MDIOSPI.OP_C45_RD, MDIOSPI.OP_C45_RD_INC]
# Construct 16-bit header
# Flush 32-bits
self.mdio_flush()
# Transfer 16-bit header
tat = 0x3 if is_read else tat
hdr = (st & 0x3) << | |
#!/usr/bin/env python3
# Convert a BMP source file to Python source.
# Copyright <NAME> and <NAME> 2016
# Released under the MIT licence
# Files created by any graphic tool exporting bmp files, e.g. gimp
# the colour depth may be 1, 4, 8, 16, or 24 pixels, lower sizes preferred
# Usage:
# ./bmp_to_icon checkbox_on.bmp checkbox_off.bmp
# puts files into a single Python file defaulting to icons.py (-o name can override default)
# with a dictionary 'icons' indexed by a number.
# The generated icon pathon script also defines a function get_icon(index)
# for accessing an icon, which returns a tuple which can directly supplied
# into the drawBitmap() function of the tft lib.
# -------------------------
# Example: Assuming an icons file called icons.py.
# then the sript usign it could look like:
#
# import tft
# import icons
# .....
# mytft = tft.TFT()
# .....
# mytft.drawBitmap(x1, y1, *icons.get_icon(0)) # draw the first icon at location x1, y1
# mytft.drawBitmap(x2, y2, *icons.get_icon(1)) # draw the scond icon at location x2, y2
import os
import argparse
from struct import unpack
# define symbol shared with repetive call as global
icon_width = None
icon_height = None
icon_colortable = None
icon_colors = None
file_icon_colors = None
icon_table = []
no_icons = 0
# split read, due to the bug in the SD card library, avoid reading
# more than 512 bytes at once, at a performance penalty
# required if the actual file position is not a multiple of 4
def split_read(f, buf, n):
BLOCKSIZE = 512 ## a sector
mv = memoryview(buf)
bytes_read = 0
for i in range(0, n - BLOCKSIZE, BLOCKSIZE):
bytes_read += f.readinto(mv[i:i + BLOCKSIZE])
if bytes_read < n and (n - bytes_read) <= BLOCKSIZE:
bytes_read += f.readinto(mv[bytes_read:n])
return bytes_read
def getname(sourcefile):
return os.path.basename(os.path.splitext(sourcefile)[0])
# pack into bit from buf into res in bits sized chunks
def explode(buf, res, offset, size, bits):
bm_ptr = 0
shift = 8 - bits
outmask = ((1 << bits) - 1)
bitmask = outmask << shift
for j in range(size):
res[offset] = ((buf[bm_ptr] & bitmask) >> shift) & outmask
bitmask >>= bits
shift -= bits
offset += 1
if bitmask == 0: # mask rebuild & data ptr advance on byte exhaust
shift = 8 - bits
bitmask = outmask << shift
bm_ptr += 1
return offset
def implode(buf, size, colors):
op = 0
ip = 0
if colors == 1: # pack 8 in one
for ip in range(0, size, 8):
buf[op] = (buf[ip] << 7 |
buf[ip + 1] << 6 |
buf[ip + 2] << 5 |
buf[ip + 3] << 4 |
buf[ip + 4] << 3 |
buf[ip + 5] << 2 |
buf[ip + 6] << 1 |
buf[ip + 7])
op += 1
elif colors == 2: # pack 4 in 1
for ip in range(0, size, 4):
buf[op] = (buf[ip] << 6 |
buf[ip + 1] << 4 |
buf[ip + 2] << 2 |
buf[ip + 3])
op += 1
elif colors == 4: # pack 2 in 1
for ip in range(0, size, 2):
buf[op] = (buf[ip] << 4 |
buf[ip + 1])
op += 1
else : # just copy
for ip in range(size):
buf[op] = buf[ip]
op += 1
return op
def process(f, outfile):
#
global icon_width
global icon_height
global icon_colortable
global icon_colors
global file_icon_colors
global icon_table
global no_icons
BM, filesize, res0, offset = unpack("<hiii", f.read(14))
(hdrsize, imgwidth, imgheight, planes, colors, compress, imgsize,
h_res, v_res, ct_size, cti_size) = unpack("<iiihhiiiiii", f.read(40))
# test consistency in a set
#
if icon_width is not None and icon_width != imgwidth:
print ("Error: All icons in a set must have the same width")
return None
else:
icon_width = imgwidth
if icon_height is not None and icon_height != imgheight:
print ("Error: All icons in a set must have the same heigth")
return None
else:
icon_height = imgheight
if file_icon_colors is not None and file_icon_colors != colors:
print ("Error: All icons in a set must have the same number of colors", file_icon_colors, colors)
return None
else:
file_icon_colors = icon_colors = colors
if colors in (1,4,8): # must have a color table
if ct_size == 0: # if 0, size is 2**colors
ct_size = 1 << colors
colortable = bytearray(ct_size * 4)
f.seek(hdrsize + 14) # go to colortable
n = split_read(f, colortable, ct_size * 4) # read colortable
if icon_colortable is None:
icon_colortable = colortable
if colors == 1:
bsize = (imgwidth + 7) // 8
res = bytearray((imgwidth * imgheight * 8) + 8) # make it big enough
elif colors == 4:
bsize = (imgwidth + 1) // 2
res = bytearray((imgwidth * imgheight * 2) + 2) # make it big enough
elif colors == 8:
bsize = imgwidth
res = bytearray((imgwidth * imgheight) + 1) # make it big enough
rsize = (bsize + 3) & 0xfffc # read size must read a multiple of 4 bytes
f.seek(offset)
icondata = []
for row in range(imgheight):
b = bytearray(rsize)
n = split_read(f, b, rsize)
if n != rsize:
print ("Error reading file")
return None
icondata.append(b) # read all lines
# convert data
offset = 0
for row in range(imgheight - 1, -1, -1):
offset = explode(icondata[row], res, offset, imgwidth, colors)
if colors == 4 and ct_size <= 4: # reduce color size from 4 to 2 is feasible
colors = 2
icon_colors = colors
offset = implode(res, offset, colors)
# store data
outfile.write("{}: (\n".format(no_icons))
for i in range(offset):
if (i % 16) == 0:
outfile.write(" b'")
outfile.write("\\x{:02x}".format(res[i]))
if (i % 16) == 15:
outfile.write("'\n")
if (i % 16) != 15:
outfile.write("'\n")
outfile.write("),\n")
else:
if icon_colortable is None:
icon_colortable = bytearray(0)
f.seek(offset)
if colors == 16:
bsize = imgwidth * 2
rsize = (imgwidth*2 + 3) & 0xfffc # must read a multiple of 4 bytes
icondata = []
for row in range(imgheight):
b = bytearray(rsize)
n = split_read(f, b, rsize)
if n != rsize:
print ("Error reading file")
return None
icondata.append(b) # read all lines
# store data
elif colors == 24:
bsize = imgwidth * 3
rsize = (imgwidth*3 + 3) & 0xfffc # must read a multiple of 4 bytes
icondata = []
for row in range(imgheight):
b = bytearray(rsize)
n = split_read(f, b, rsize)
if n != rsize:
print ("Error reading file")
return None
icondata.append(b) # read all lines
#
outfile.write("{}: (\n".format(no_icons))
for row in range(imgheight - 1, -1, -1):
for i in range (bsize):
if (i % 16) == 0:
outfile.write(" b'")
outfile.write("\\x{:02x}".format(icondata[row][i]))
if (i % 16) == 15:
outfile.write("'\n")
if (i % 16) != 15:
outfile.write("'\n")
outfile.write("),\n")
no_icons += 1
return no_icons
def write_header(outfile):
outfile.write("""
# Code generated by bmp_to_icon.py
from uctypes import addressof
"""
)
outfile.write("_icons = { \n")
def write_trailer(outfile):
outfile.write('}\n\n')
outfile.write("colortable = { 0: (\n b'")
size = len(icon_colortable)
if (size >= 8): # only for bitmaps with a colortable
icon_colortable[3] = icon_colors # store color bits in table
for i in range(size):
outfile.write("\\x{:02x}".format(icon_colortable[i]))
if (i % 16) == 15 and i != (size - 1):
outfile.write("'\n b'")
outfile.write("')\n}\n")
outfile.write("width = {}\n".format(icon_width))
outfile.write("height = {}\n".format(icon_height))
outfile.write("colors = {}\n".format(icon_colors))
outfile.write("""
def get_icon(icon_index = 0, color_index = 0):
return width, height, addressof(_icons[icon_index]), colors, addressof(colortable[color_index])
def draw(x, y, icon_index, draw_fct, color_index = 0):
draw_fct(x - width//2, y - height // 2, width, height, addressof(_icons[icon_index]), colors, addressof(colortable[color_index]))
""")
def load_bmp(sourcefiles, destfile):
try:
with open(getname(destfile) + ".py", 'w') as outfile:
write_header(outfile)
for sourcefile in sourcefiles:
with open(sourcefile, 'rb') as f:
if process(f, outfile) is None:
break
write_trailer(outfile)
except OSError as err:
print(err)
if __name__ == "__main__":
parser = argparse.ArgumentParser(__file__, description =
"""Utility for producing a icon set file for the tft module by converting BMP files.
Sample usage: ./bmp_to_icon.py checkbox_empty.bmp checkbox_tick.bmp
Produces icons.py""",
formatter_class = argparse.RawDescriptionHelpFormatter)
parser.add_argument('infiles', metavar ='N', type = str, nargs = '+', help = 'input file paths')
parser.add_argument("--outfile", "-o", default = 'icons', help = "Path and name of output file (w/o extension)", required = False)
args = parser.parse_args()
errlist = [f for f in args.infiles if not f[0].isalpha()]
if len(errlist):
print('Font filenames must be valid | |
to edit" )
return trans.show_error_message( "You must provide a history dataset id to edit" )
if data is None:
trans.log_event( "Problem retrieving dataset (encoded: %s, decoded: %s) with history id %s." % ( str( dataset_id ), str( id ), str( hid ) ) )
return trans.show_error_message( "History dataset id is invalid" )
if dataset_id is not None and data.history.user is not None and data.history.user != trans.user:
trans.log_event( "User attempted to edit an HDA they do not own (encoded: %s, decoded: %s)" % ( dataset_id, id ) )
# Do not reveal the dataset's existence
return trans.show_error_message( "History dataset id is invalid" )
current_user_roles = trans.get_current_user_roles()
if data.history.user and not data.dataset.has_manage_permissions_roles( trans ):
# Permission setting related to DATASET_MANAGE_PERMISSIONS was broken for a period of time,
# so it is possible that some Datasets have no roles associated with the DATASET_MANAGE_PERMISSIONS
# permission. In this case, we'll reset this permission to the hda user's private role.
manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action )
permissions = { manage_permissions_action : [ trans.app.security_agent.get_private_user_role( data.history.user ) ] }
trans.app.security_agent.set_dataset_permission( data.dataset, permissions )
if self._can_access_dataset( trans, data ):
if data.state == trans.model.Dataset.states.UPLOAD:
return trans.show_error_message( "Please wait until this dataset finishes uploading before attempting to edit its metadata." )
params = util.Params( kwd, sanitize=False )
if params.change:
# The user clicked the Save button on the 'Change data type' form
if data.datatype.allow_datatype_change and trans.app.datatypes_registry.get_datatype_by_extension( params.datatype ).allow_datatype_change:
#prevent modifying datatype when dataset is queued or running as input/output
if not __ok_to_edit_metadata( data.id ):
message = "This dataset is currently being used as input or output. You cannot change datatype until the jobs have completed or you have canceled them."
error = True
else:
trans.app.datatypes_registry.change_datatype( data, params.datatype )
trans.sa_session.flush()
trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming = { 'input1':data }, overwrite = False ) #overwrite is False as per existing behavior
message = "Changed the type of dataset '%s' to %s" % ( to_unicode( data.name ), params.datatype )
refresh_frames=['history']
else:
message = "You are unable to change datatypes in this manner. Changing %s to %s is not allowed." % ( data.extension, params.datatype )
error = True
elif params.save:
# The user clicked the Save button on the 'Edit Attributes' form
data.name = params.name if params.name else ''
data.info = params.info if params.info else ''
message = ''
if __ok_to_edit_metadata( data.id ):
# The following for loop will save all metadata_spec items
for name, spec in data.datatype.metadata_spec.items():
if spec.get("readonly"):
continue
optional = params.get("is_"+name, None)
other = params.get("or_"+name, None)
if optional and optional == 'true':
# optional element... == 'true' actually means it is NOT checked (and therefore omitted)
setattr(data.metadata, name, None)
else:
if other:
setattr( data.metadata, name, other )
else:
setattr( data.metadata, name, spec.unwrap( params.get (name, None) ) )
data.datatype.after_setting_metadata( data )
# Sanitize annotation before adding it.
if params.annotation:
annotation = sanitize_html( params.annotation, 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), data, annotation )
# This block on controller code is inactive until the 'extended_metadata' edit box is added back into the UI
# Add or delete extended metadata
# if params.extended_metadata:
# em_string = params.extended_metadata
# if len(em_string):
# em_payload = None
# try:
# em_payload = loads(em_string)
# except Exception, e:
# message = 'Invalid JSON input'
# error = True
# if em_payload is not None:
# if data is not None:
# ex_obj = self.get_item_extended_metadata_obj(trans, data)
# if ex_obj is not None:
# self.unset_item_extended_metadata_obj(trans, data)
# self.delete_extended_metadata(trans, ex_obj)
# ex_obj = self.create_extended_metadata(trans, em_payload)
# self.set_item_extended_metadata_obj(trans, data, ex_obj)
# message = "Updated Extended metadata '%s'." % data.name
# status = 'done'
# else:
# message = "data not found"
# error = True
# else:
# if data is not None:
# ex_obj = self.get_item_extended_metadata_obj(trans, data)
# if ex_obj is not None:
# self.unset_item_extended_metadata_obj(trans, data)
# self.delete_extended_metadata(trans, ex_obj)
# message = "Deleted Extended metadata '%s'." % data.name
# status = 'done'
# If setting metadata previously failed and all required elements have now been set, clear the failed state.
if data._state == trans.model.Dataset.states.FAILED_METADATA and not data.missing_meta():
data._state = None
trans.sa_session.flush()
message = "Attributes updated%s" % message
refresh_frames=['history']
else:
trans.sa_session.flush()
message = "Attributes updated, but metadata could not be changed because this dataset is currently being used as input or output. You must cancel or wait for these jobs to complete before changing metadata."
status = "warning"
refresh_frames=['history']
elif params.detect:
# The user clicked the Auto-detect button on the 'Edit Attributes' form
#prevent modifying metadata when dataset is queued or running as input/output
if not __ok_to_edit_metadata( data.id ):
message = "This dataset is currently being used as input or output. You cannot change metadata until the jobs have completed or you have canceled them."
error = True
else:
for name, spec in data.metadata.spec.items():
# We need to be careful about the attributes we are resetting
if name not in [ 'name', 'info', 'dbkey', 'base_name' ]:
if spec.get( 'default' ):
setattr( data.metadata, name, spec.unwrap( spec.get( 'default' ) ) )
message = 'Attributes have been queued to be updated'
trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming = { 'input1':data } )
trans.sa_session.flush()
refresh_frames=['history']
elif params.convert_data:
target_type = kwd.get("target_type", None)
if target_type:
message = data.datatype.convert_dataset(trans, data, target_type)
refresh_frames=['history']
elif params.update_roles_button:
if not trans.user:
return trans.show_error_message( "You must be logged in if you want to change permissions." )
if trans.app.security_agent.can_manage_dataset( current_user_roles, data.dataset ):
access_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_ACCESS.action )
manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action )
# The user associated the DATASET_ACCESS permission on the dataset with 1 or more roles. We
# need to ensure that they did not associate roles that would cause accessibility problems.
permissions, in_roles, error, message = \
trans.app.security_agent.derive_roles_from_access( trans, data.dataset.id, 'root', **kwd )
if error:
# Keep the original role associations for the DATASET_ACCESS permission on the dataset.
permissions[ access_action ] = data.dataset.get_access_roles( trans )
status = 'error'
else:
error = trans.app.security_agent.set_all_dataset_permissions( data.dataset, permissions )
if error:
message += error
status = 'error'
else:
message = 'Your changes completed successfully.'
trans.sa_session.refresh( data.dataset )
else:
message = "You are not authorized to change this dataset's permissions"
error = True
else:
if "dbkey" in data.datatype.metadata_spec and not data.metadata.dbkey:
# Copy dbkey into metadata, for backwards compatability
# This looks like it does nothing, but getting the dbkey
# returns the metadata dbkey unless it is None, in which
# case it resorts to the old dbkey. Setting the dbkey
# sets it properly in the metadata
#### This is likely no longer required, since the dbkey exists entirely within metadata (the old_dbkey field is gone): REMOVE ME?
data.metadata.dbkey = data.dbkey
# let's not overwrite the imported datatypes module with the variable datatypes?
# the built-in 'id' is overwritten in lots of places as well
ldatatypes = [ dtype_name for dtype_name, dtype_value in trans.app.datatypes_registry.datatypes_by_extension.iteritems() if dtype_value.allow_datatype_change ]
ldatatypes.sort()
all_roles = trans.app.security_agent.get_legitimate_roles( trans, data.dataset, 'root' )
if error:
status = 'error'
return trans.fill_template( "/dataset/edit_attributes.mako",
data=data,
data_annotation=self.get_item_annotation_str( trans.sa_session, trans.user, data ),
datatypes=ldatatypes,
current_user_roles=current_user_roles,
all_roles=all_roles,
message=message,
status=status,
dataset_id=dataset_id,
refresh_frames=refresh_frames )
else:
return trans.show_error_message( "You do not have permission to edit this dataset's ( id: %s ) information." % str( dataset_id ) )
@web.expose
@web.require_login( "see all available datasets" )
def list( self, trans, **kwargs ):
"""List all available datasets"""
status = message = None
if 'operation' in kwargs:
operation = kwargs['operation'].lower()
hda_ids = util.listify( kwargs.get( 'id', [] ) )
# Display no message by default
status, message = None, None
# Load the hdas and ensure they all belong to the current user
hdas = []
for encoded_hda_id in hda_ids:
hda_id = self.decode_id( encoded_hda_id )
hda = trans.sa_session.query( model.HistoryDatasetAssociation ).filter_by( id=hda_id ).first()
if hda:
# Ensure history is owned by current user
if hda.history.user_id != None and trans.user:
assert trans.user.id == hda.history.user_id, "HistoryDatasetAssocation does not belong to current user"
hdas.append( hda )
else:
log.warn( "Invalid history_dataset_association id '%r' passed to list", hda_id )
if hdas:
if operation == "switch" or operation == "switch_history":
# Switch to a history that the HDA resides in.
# Convert hda to histories.
histories = []
for hda in hdas:
histories.append( hda.history )
# Use history controller to switch | |
<gh_stars>10-100
import logging
from enum import Enum, unique
from lib import constant_pool
from lib import run_time_data
from lib import descriptor
from lib import frame as FRAME
from lib.hijack_jre_methods import get_native_method
from lib import class_loader
OPCODES = {}
def bytecode(code):
def bytecode_decorator(klass):
OPCODES[code] = klass
return klass
return bytecode_decorator
@unique
class NextStep(Enum):
next_instruction = 0
jump_to = 1
invoke_method = 2
method_return = 3
class _instruction(object):
def __init__(self, address):
self.address = address
# For method internal loop
self.need_jump = False
self.jump_to_address = None
# For call other method
self.invoke_method = False
self.invoke_class_name = None
self.invoke_method_name = None
self.invoke_method_descriptor = None
self.invoke_objectref = None
self.invoke_parameters = []
# For return
self.method_return = False
self.return_value = None
def init_jump(self):
self.need_jump = False
self.jump_to_address = None
def init_invoke_method(self):
self.invoke_method = False
self.invoke_class_name = None
self.invoke_method_name = None
self.invoke_method_descriptor = None
self.invoke_objectref = None
self.invoke_parameters = []
def len_of_operand(self):
return 0
def put_operands(self, operand_bytes):
pass
def class_name_and_address(self):
return '{name} (addr:{address})'.format(name=type(self).__name__, address=self.address)
def next_step(self):
if self.invoke_method:
return NextStep.invoke_method
elif self.need_jump:
return NextStep.jump_to
elif self.method_return:
return NextStep.method_return
else:
return NextStep.next_instruction
def execute(self, frame):
raise NotImplementedError('execute in base instruction is not implemented, instruction {name}'.format(name=self.class_name_and_address()))
@bytecode(0x01)
class aconst_null(_instruction):
def execute(self, frame):
frame.operand_stack.append(None)
logging.debug(
f'Instruction {self.class_name_and_address()}: '
f'push null onto operand stack\n'
f'\t{frame.operand_debug_str()}'
)
class iconst_i(_instruction):
def __init__(self, address, i=0):
super().__init__(address)
self.i = i
def execute(self, frame):
frame.operand_stack.append(self.i)
logging.debug(
f'Instruction {self.class_name_and_address()}: '
f'push {self.i} onto operand stack\n'
f'\t{frame.operand_debug_str()}'
)
@bytecode(0x02)
class iconst_m1(iconst_i):
def __init__(self, address):
super().__init__(address, -1)
@bytecode(0x03)
class iconst_0(iconst_i):
def __init__(self, address):
super().__init__(address, 0)
@bytecode(0x04)
class iconst_1(iconst_i):
def __init__(self, address):
super().__init__(address, 1)
@bytecode(0x05)
class iconst_2(iconst_i):
def __init__(self, address):
super().__init__(address, 2)
@bytecode(0x06)
class iconst_3(iconst_i):
def __init__(self, address):
super().__init__(address, 3)
@bytecode(0x07)
class iconst_4(iconst_i):
def __init__(self, address):
super().__init__(address, 4)
@bytecode(0x08)
class iconst_5(iconst_i):
def __init__(self, address):
super().__init__(address, 5)
@bytecode(0x10)
class bipush(iconst_i):
def __init__(self, address):
super().__init__(address)
def len_of_operand(self):
return 1
def put_operands(self, operand_bytes):
assert type(operand_bytes[0]) is int
self.i = operand_bytes[0]
@bytecode(0x11)
class sipush(iconst_i):
def __init__(self, address):
super().__init__(address)
def len_of_operand(self):
return 2
def put_operands(self, operand_bytes):
assert len(operand_bytes) == 2
self.i = int.from_bytes(operand_bytes, byteorder='big', signed=False)
@bytecode(0x12)
class ldc(_instruction):
def len_of_operand(self):
return 1
def put_operands(self, operand_bytes):
assert type(operand_bytes[0]) is int
self.index = operand_bytes[0]
def execute(self, frame):
constant = frame.klass.constant_pool[self.index]
if type(constant) is constant_pool.ConstantString:
frame.operand_stack.append(
frame.klass.constant_pool[constant.string_index].value())
elif type(constant) in (
constant_pool.ConstantInteger,
constant_pool.ConstantFloat
):
frame.operand_stack.append(constant.value)
else:
assert False, \
f'constant type is {type(constant)}, '\
'not know what is used for yet'
class iload_n(_instruction):
def __init__(self, address, n=0):
super().__init__(address)
self.n = n
def execute(self, frame):
assert type(frame.local_variables[self.n]) is int
frame.operand_stack.append(frame.local_variables[self.n])
logging.debug(
f'Instruction {self.class_name_and_address()}: '
f'push {frame.local_variables[self.n]} onto operand stack '
f'from local variable {self.n}\n'
f'\t{frame.operand_debug_str()}\n'
f'\t{frame.local_variable_debug_str()}'
)
@bytecode(0x15)
class iload(iload_n):
def __init__(self, address):
super().__init__(address)
def len_of_operand(self):
return 1
def put_operands(self, operand_bytes):
assert type(operand_bytes[0]) is int
self.n = operand_bytes[0]
@bytecode(0x1a)
class iload_0(iload_n):
def __init__(self, address):
super().__init__(address, 0)
@bytecode(0x1b)
class iload_1(iload_n):
def __init__(self, address):
super().__init__(address, 1)
@bytecode(0x1c)
class iload_2(iload_n):
def __init__(self, address):
super().__init__(address, 2)
@bytecode(0x1d)
class iload_3(iload_n):
def __init__(self, address):
super().__init__(address, 3)
class astore_n(_instruction):
def __init__(self, address, n=0):
super().__init__(address)
self.n = n
def execute(self, frame):
objectref = frame.operand_stack.pop()
# TODO: type can be returnAddress reference, what is returnAddress?
assert type(objectref) is FRAME.Object,\
f'Type of ref in astore is type(objectref)'
frame.local_variables[self.n] = objectref
logging.debug(
f'Instruction {self.class_name_and_address()}: '
f'pop {objectref} from operand stack and store into '
f'local variable {self.n}\n'
f'\t{frame.operand_debug_str()}\n'
f'\t{frame.local_variable_debug_str()}'
)
@bytecode(0x3a)
class astore(astore_n):
def __init__(self, address):
super().__init__(address)
def len_of_operand(self):
return 1
def put_operands(self, operand_bytes):
assert type(operand_bytes[0]) is int
self.n = operand_bytes[0]
@bytecode(0x4b)
class astore_0(astore_n):
def __init__(self, address):
super().__init__(address, 0)
@bytecode(0x4c)
class astore_1(astore_n):
def __init__(self, address):
super().__init__(address, 1)
@bytecode(0x4d)
class astore_2(astore_n):
def __init__(self, address):
super().__init__(address, 2)
@bytecode(0x4e)
class astore_3(astore_n):
def __init__(self, address):
super().__init__(address, 3)
class aload_n(_instruction):
def __init__(self, address, n=0):
super().__init__(address)
self.n = n
def execute(self, frame):
assert type(frame.local_variables[self.n]) is FRAME.Object,\
f'Type of ref in aload is {type(frame.local_variables[self.n])}'
frame.operand_stack.append(frame.local_variables[self.n])
logging.debug(
f'Instruction {self.class_name_and_address()}: '
f'push {frame.local_variables[self.n]} onto operand stack '
f'from local variable {self.n}\n'
f'\t{frame.operand_debug_str()}\n'
f'\t{frame.local_variable_debug_str()}'
)
@bytecode(0x25)
class aload(aload_n):
def __init__(self, address):
super().__init__(address)
def len_of_operand(self):
return 1
def put_operands(self, operand_bytes):
assert type(operand_bytes[0]) is int
self.n = operand_bytes[0]
@bytecode(0x2a)
class aload_0(aload_n):
def __init__(self, address):
super().__init__(address, 0)
@bytecode(0x2b)
class aload_1(aload_n):
def __init__(self, address):
super().__init__(address, 1)
@bytecode(0x2c)
class aload_2(aload_n):
def __init__(self, address):
super().__init__(address, 2)
@bytecode(0x2d)
class aload_3(aload_n):
def __init__(self, address):
super().__init__(address, 3)
class istore_n(_instruction):
def __init__(self, address, n=0):
super().__init__(address)
self.n = n
def execute(self, frame):
i = frame.operand_stack.pop()
assert type(i) is int
frame.local_variables[self.n] = i
logging.debug(
f'Instruction {self.class_name_and_address()}: '
f'pop {i} from operand stack and set to local variable {self.n}\n'
f'\t{frame.operand_debug_str()}\n'
f'\t{frame.local_variable_debug_str()}'
)
@bytecode(0x36)
class istore(istore_n):
def __init__(self, address):
super().__init__(address)
def len_of_operand(self):
return 1
def put_operands(self, operand_bytes):
assert type(operand_bytes[0]) is int
self.n = operand_bytes[0]
@bytecode(0x3b)
class istore_0(istore_n):
def __init__(self, address):
super().__init__(address, 0)
@bytecode(0x3c)
class istore_1(istore_n):
def __init__(self, address):
super().__init__(address, 1)
@bytecode(0x3d)
class istore_2(istore_n):
def __init__(self, address):
super().__init__(address, 2)
@bytecode(0x3e)
class istore_3(istore_n):
def __init__(self, address):
super().__init__(address, 3)
@bytecode(0x57)
class pop(_instruction):
def execute(self, frame):
frame.operand_stack.pop()
logging.debug(
f'Instruction {self.class_name_and_address()}: '
'Pop the top value from the operand stack\n'
f'\t{frame.operand_debug_str()}'
)
@bytecode(0x59)
class dup(_instruction):
def execute(self, frame):
frame.operand_stack.append(frame.operand_stack[-1])
logging.debug(
f'Instruction {self.class_name_and_address()}: '
'Duplicate the top operand stack value\n'
f'\t{frame.operand_debug_str()}'
)
@bytecode(0x60)
class iadd(_instruction):
def execute(self, frame):
value2 = frame.operand_stack.pop()
value1 = frame.operand_stack.pop()
assert type(value1) is int
assert type(value2) is int
value = value1 + value2
frame.operand_stack.append(value)
logging.debug(
f'Instruction {self.class_name_and_address()}: '
f'add value1 and value2, push {value} onto operand stack\n'
f'\t{frame.operand_debug_str()}'
)
@bytecode(0x70)
class irem(_instruction):
def execute(self, frame):
value2 = frame.operand_stack.pop()
value1 = frame.operand_stack.pop()
assert type(value1) is int
assert type(value2) is int
# That the defination in JRE document, but we can use % operator
# value = int(value1 - int(value1 / value2) * value2)
value = value1 % value2
frame.operand_stack.append(value)
logging.debug(
f'Instruction {self.class_name_and_address()}: Remainder int, '
f'value1 is {value1}, value2 is {value2}, '
f'push result value {value} onto operand stack\n'
f'\t{frame.operand_debug_str()}'
)
@bytecode(0x64)
class isub(_instruction):
def execute(self, frame):
value2 = frame.operand_stack.pop()
value1 = frame.operand_stack.pop()
assert type(value1) is int
assert type(value2) is int
value = value1 - value2
frame.operand_stack.append(value)
logging.debug(
f'Instruction {self.class_name_and_address()}: '
f'Subtract value1 and value2, push {value} onto operand stack\n'
f'\t{frame.operand_debug_str()}'
)
@bytecode(0x68)
class imul(_instruction):
def execute(self, frame):
value2 = frame.operand_stack.pop()
value1 = frame.operand_stack.pop()
assert type(value1) is int
assert type(value2) is int
value = value1 * value2
frame.operand_stack.append(value)
logging.debug(
f'Instruction {self.class_name_and_address()}: '
f'multiply value1 and value2, push {value} onto operand stack\n'
f'\t{frame.operand_debug_str()}'
)
@bytecode(0x6c)
class idiv(_instruction):
def execute(self, frame):
value2 = frame.operand_stack.pop()
value1 = frame.operand_stack.pop()
assert type(value1) is int
assert type(value2) is int
if value2 == 0:
raise NotImplementedError(
'Exception have not implemented. '
'Should through ArithmeticException'
)
value = value1 // value2
frame.operand_stack.append(value)
logging.debug(
f'Instruction {self.class_name_and_address()}: '
f'Divide value1 and value2, push {value} onto operand stack\n'
f'\t{frame.operand_debug_str()}'
)
@bytecode(0x84)
class iinc(_instruction):
def len_of_operand(self):
return 2
def put_operands(self, operand_bytes):
assert len(operand_bytes) == 2
self.index = int.from_bytes(operand_bytes[:1], byteorder='big', signed=False)
self.const = int.from_bytes(operand_bytes[1:], byteorder='big', signed=True)
def execute(self, frame):
frame.local_variables[self.index] = frame.local_variables[self.index] + self.const
logging.debug(
'Instruction {na}: increate local value {i} by {v} to value {fv}'.format(
na=self.class_name_and_address(),
i=self.index,
v=self.const,
fv=frame.local_variables[self.index]
)
)
class if_icmpcond(_instruction):
def len_of_operand(self):
return 2
def put_operands(self, operand_bytes):
assert len(operand_bytes) == 2
self.offset = int.from_bytes(operand_bytes, byteorder='big', signed=True)
def execute(self, frame):
self.init_jump()
value2 = frame.operand_stack.pop()
value1 = frame.operand_stack.pop()
if self.cmp(value1, value2):
self.need_jump = True
self.jump_to_address = self.address + self.offset
logging.debug(
'Instruction {na}: compare value1 and value2 from stack, result need {j}'.format(
na=self.class_name_and_address(),
j='jump to address {0}'.format(self.jump_to_address) if self.need_jump else 'not jump'
)
)
def cmp(self, value1, value2):
raise NotImplementedError('cmp function in if_icmpcond will not be implement.')
@bytecode(0x9f)
class if_icmpeq(if_icmpcond):
def cmp(self, value1, value2):
return value1 == value2
@bytecode(0xa0)
class if_icmpne(if_icmpcond):
def cmp(self, value1, value2):
return value1 != value2
@bytecode(0xa1)
class if_icmplt(if_icmpcond):
def cmp(self, value1, value2):
return value1 < value2
@bytecode(0xa2)
class if_icmpge(if_icmpcond):
def cmp(self, value1, value2):
return value1 >= value2
@bytecode(0xa3)
class if_icmpgt(if_icmpcond):
def cmp(self, value1, value2):
return value1 > value2
@bytecode(0xa4)
class if_icmple(if_icmpcond):
def cmp(self, value1, value2):
return value1 <= value2
@bytecode(0xa7)
class goto(_instruction):
def len_of_operand(self):
return 2
def put_operands(self, operand_bytes):
assert len(operand_bytes) == 2
self.offset = int.from_bytes(operand_bytes, byteorder='big', signed=True)
def execute(self, frame):
self.need_jump = True
self.jump_to_address = self.address + self.offset
logging.debug(
'Instruction {na}: jump to address {a}'.format(
na=self.class_name_and_address(),
a=self.jump_to_address
)
)
@bytecode(0xac)
class ireturn(_instruction):
def execute(self, frame):
self.method_return = True
self.return_value = frame.operand_stack.pop()
assert type(self.return_value) is int, 'ireturn, but get value from operand in type {t}'.format(type(self.return_value))
logging.debug(
'Instruction {na}: return value {v}'.format(
na=self.class_name_and_address(),
v=self.return_value
)
)
@bytecode(0xb0)
class areturn(_instruction):
def execute(self, frame):
self.method_return = True
self.return_value = frame.operand_stack.pop()
assert type(self.return_value) is FRAME.Object, \
f'areturn, but get value from operand in type {type(self.return_value)}'
logging.debug(
'Instruction {na}: return value {v}'.format(
na=self.class_name_and_address(),
v=self.return_value
)
)
@bytecode(0xb1)
class instruction_return(_instruction):
def execute(self, frame):
self.method_return = True
logging.debug(
'Instruction {na}: void return'.format(
na=self.class_name_and_address()
)
)
@bytecode(0xb2)
class getstatic(_instruction):
def len_of_operand(self):
return 2
def put_operands(self, operand_bytes):
assert len(operand_bytes) == 2
self.index = int.from_bytes(
| |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
The Python parts of the Jedi library for VIM. It is mostly about communicating
with VIM.
"""
import traceback # for exception output
import re
import os
import sys
from shlex import split as shsplit
from contextlib import contextmanager
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest # Python 2
import vim
is_py3 = sys.version_info[0] >= 3
if is_py3:
ELLIPSIS = "…"
unicode = str
else:
ELLIPSIS = u"…"
try:
# Somehow sys.prefix is set in combination with VIM and virtualenvs.
# However the sys path is not affected. Just reset it to the normal value.
sys.prefix = sys.base_prefix
sys.exec_prefix = sys.base_exec_prefix
except AttributeError:
# If we're not in a virtualenv we don't care. Everything is fine.
pass
class PythonToVimStr(unicode):
""" Vim has a different string implementation of single quotes """
__slots__ = []
def __new__(cls, obj, encoding='UTF-8'):
if not (is_py3 or isinstance(obj, unicode)):
obj = unicode.__new__(cls, obj, encoding)
# Vim cannot deal with zero bytes:
obj = obj.replace('\0', '\\0')
return unicode.__new__(cls, obj)
def __repr__(self):
# this is totally stupid and makes no sense but vim/python unicode
# support is pretty bad. don't ask how I came up with this... It just
# works...
# It seems to be related to that bug: http://bugs.python.org/issue5876
if unicode is str:
s = self
else:
s = self.encode('UTF-8')
return '"%s"' % s.replace('\\', '\\\\').replace('"', r'\"')
class VimError(Exception):
def __init__(self, message, throwpoint, executing):
super(type(self), self).__init__(message)
self.message = message
self.throwpoint = throwpoint
self.executing = executing
def __str__(self):
return self.message + '; created by: ' + repr(self.executing)
def _catch_exception(string, is_eval):
"""
Interface between vim and python calls back to it.
Necessary, because the exact error message is not given by `vim.error`.
"""
result = vim.eval('jedi#_vim_exceptions({0}, {1})'.format(
repr(PythonToVimStr(string, 'UTF-8')), int(is_eval)))
if 'exception' in result:
raise VimError(result['exception'], result['throwpoint'], string)
return result['result']
def vim_command(string):
_catch_exception(string, is_eval=False)
def vim_eval(string):
return _catch_exception(string, is_eval=True)
def no_jedi_warning(error=None):
vim.command('echohl WarningMsg')
vim.command('echom "Please install Jedi if you want to use jedi-vim."')
if error:
vim.command('echom "The error was: {0}"'.format(error))
vim.command('echohl None')
def echo_highlight(msg):
vim_command('echohl WarningMsg | echom "jedi-vim: {0}" | echohl None'.format(
str(msg).replace('"', '\\"')))
jedi_path = os.path.join(os.path.dirname(__file__), 'jedi')
sys.path.insert(0, jedi_path)
parso_path = os.path.join(os.path.dirname(__file__), 'parso')
sys.path.insert(0, parso_path)
try:
import jedi
except ImportError:
jedi = None
jedi_import_error = sys.exc_info()
else:
try:
version = jedi.__version__
except Exception as e: # e.g. AttributeError
echo_highlight(
"Error when loading the jedi python module ({0}). "
"Please ensure that Jedi is installed correctly (see Installation "
"in the README.".format(e))
jedi = None
else:
if isinstance(version, str):
# the normal use case, now.
from jedi import utils
version = utils.version_info()
if version < (0, 7):
echo_highlight('Please update your Jedi version, it is too old.')
finally:
sys.path.remove(jedi_path)
sys.path.remove(parso_path)
def catch_and_print_exceptions(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (Exception, vim.error):
print(traceback.format_exc())
return None
return wrapper
def _check_jedi_availability(show_error=False):
def func_receiver(func):
def wrapper(*args, **kwargs):
if jedi is None:
if show_error:
no_jedi_warning()
return
else:
return func(*args, **kwargs)
return wrapper
return func_receiver
current_environment = (None, None)
def get_environment(use_cache=True):
global current_environment
vim_force_python_version = vim_eval("g:jedi#force_py_version")
if use_cache and vim_force_python_version == current_environment[0]:
return current_environment[1]
environment = None
if vim_force_python_version == "auto":
environment = jedi.api.environment.get_cached_default_environment()
else:
force_python_version = vim_force_python_version
if '0000' in force_python_version or '9999' in force_python_version:
# It's probably a float that wasn't shortened.
try:
force_python_version = "{:.1f}".format(float(force_python_version))
except ValueError:
pass
elif isinstance(force_python_version, float):
force_python_version = "{:.1f}".format(force_python_version)
try:
environment = jedi.get_system_environment(force_python_version)
except jedi.InvalidPythonEnvironment as exc:
environment = jedi.api.environment.get_cached_default_environment()
echo_highlight(
"force_python_version=%s is not supported: %s - using %s." % (
vim_force_python_version, str(exc), str(environment)))
current_environment = (vim_force_python_version, environment)
return environment
def get_known_environments():
"""Get known Jedi environments."""
envs = list(jedi.api.environment.find_virtualenvs())
envs.extend(jedi.api.environment.find_system_environments())
return envs
@catch_and_print_exceptions
def get_script(source=None, column=None):
jedi.settings.additional_dynamic_modules = [
b.name for b in vim.buffers if (
b.name is not None and
b.name.endswith('.py') and
b.options['buflisted'])]
if source is None:
source = '\n'.join(vim.current.buffer)
row = vim.current.window.cursor[0]
if column is None:
column = vim.current.window.cursor[1]
buf_path = vim.current.buffer.name
return jedi.Script(
source, row, column, buf_path,
encoding=vim_eval('&encoding') or 'latin1',
environment=get_environment(),
)
@_check_jedi_availability(show_error=False)
@catch_and_print_exceptions
def completions():
row, column = vim.current.window.cursor
# Clear call signatures in the buffer so they aren't seen by the completer.
# Call signatures in the command line can stay.
if int(vim_eval("g:jedi#show_call_signatures")) == 1:
clear_call_signatures()
if vim.eval('a:findstart') == '1':
count = 0
for char in reversed(vim.current.line[:column]):
if not re.match(r'[\w\d]', char):
break
count += 1
vim.command('return %i' % (column - count))
else:
base = vim.eval('a:base')
source = ''
for i, line in enumerate(vim.current.buffer):
# enter this path again, otherwise source would be incomplete
if i == row - 1:
source += line[:column] + base + line[column:]
else:
source += line
source += '\n'
# here again hacks, because jedi has a different interface than vim
column += len(base)
try:
script = get_script(source=source, column=column)
completions = script.completions()
signatures = script.call_signatures()
out = []
for c in completions:
d = dict(word=PythonToVimStr(c.name[:len(base)] + c.complete),
abbr=PythonToVimStr(c.name_with_symbols),
# stuff directly behind the completion
menu=PythonToVimStr(c.description),
info=PythonToVimStr(c.docstring()), # docstr
icase=1, # case insensitive
dup=1 # allow duplicates (maybe later remove this)
)
out.append(d)
strout = str(out)
except Exception:
# print to stdout, will be in :messages
print(traceback.format_exc())
strout = ''
completions = []
signatures = []
show_call_signatures(signatures)
vim.command('return ' + strout)
@contextmanager
def tempfile(content):
# Using this instead of the tempfile module because Windows won't read
# from a file not yet written to disk
with open(vim_eval('tempname()'), 'w') as f:
f.write(content)
try:
yield f
finally:
os.unlink(f.name)
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def goto(mode="goto"):
"""
:param str mode: "definition", "assignment", "goto"
:return: list of definitions/assignments
:rtype: list
"""
script = get_script()
if mode == "goto":
definitions = script.goto_assignments(follow_imports=True)
elif mode == "definition":
definitions = script.goto_definitions()
elif mode == "assignment":
definitions = script.goto_assignments()
if not definitions:
echo_highlight("Couldn't find any definitions for this.")
elif len(definitions) == 1 and mode != "related_name":
d = list(definitions)[0]
if d.in_builtin_module():
if d.is_keyword:
echo_highlight("Cannot get the definition of Python keywords.")
else:
echo_highlight("Builtin modules cannot be displayed (%s)."
% d.desc_with_module)
else:
using_tagstack = int(vim_eval('g:jedi#use_tag_stack')) == 1
if (d.module_path or '') != vim.current.buffer.name:
result = new_buffer(d.module_path,
using_tagstack=using_tagstack)
if not result:
return []
if (using_tagstack and d.module_path and
os.path.exists(d.module_path)):
tagname = d.name
with tempfile('{0}\t{1}\t{2}'.format(
tagname, d.module_path, 'call cursor({0}, {1})'.format(
d.line, d.column + 1))) as f:
old_tags = vim.eval('&tags')
old_wildignore = vim.eval('&wildignore')
try:
# Clear wildignore to ensure tag file isn't ignored
vim.command('set wildignore=')
vim.command('let &tags = %s' %
repr(PythonToVimStr(f.name)))
vim.command('tjump %s' % tagname)
finally:
vim.command('let &tags = %s' %
repr(PythonToVimStr(old_tags)))
vim.command('let &wildignore = %s' %
repr(PythonToVimStr(old_wildignore)))
vim.current.window.cursor = d.line, d.column
else:
show_goto_multi_results(definitions)
return definitions
def relpath(path):
"""Make path relative to cwd if it is below."""
abspath = os.path.abspath(path)
if abspath.startswith(os.getcwd()):
return os.path.relpath(path)
return path
def annotate_description(d):
code = d.get_line_code().strip()
if d.type == 'statement':
return code
if d.type == 'function':
if code.startswith('def'):
return code
typ = 'def'
else:
typ = d.type
return '[%s] %s' % (typ, code)
def show_goto_multi_results(definitions):
"""Create a quickfix list for multiple definitions."""
lst = []
for d in definitions:
if d.in_builtin_module():
lst.append(dict(text=PythonToVimStr('Builtin ' + d.description)))
elif d.module_path is None:
# Typically a namespace, in the future maybe other things as
# well.
lst.append(dict(text=PythonToVimStr(d.description)))
else:
text = annotate_description(d)
lst.append(dict(filename=PythonToVimStr(relpath(d.module_path)),
lnum=d.line, col=d.column + 1,
text=PythonToVimStr(text)))
vim_eval('setqflist(%s)' % repr(lst))
vim_eval('jedi#add_goto_window(' + str(len(lst)) + ')')
@catch_and_print_exceptions
def usages(visuals=True):
script = get_script()
definitions = script.usages()
if not definitions:
echo_highlight("No usages found here.")
return definitions
if visuals:
highlight_usages(definitions)
show_goto_multi_results(definitions)
return definitions
def highlight_usages(definitions, length=None):
for definition in definitions:
# Only color the current module/buffer.
if (definition.module_path or '') == vim.current.buffer.name:
# mathaddpos needs a list of positions where a position is a list
# of (line, column, length).
# The column starts with 1 and not 0.
positions = [
[definition.line, definition.column + 1, length or len(definition.name)]
]
vim_eval("matchaddpos('jediUsage', %s)" % repr(positions))
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def show_documentation():
script = get_script()
try:
definitions = script.goto_definitions()
except jedi.NotFoundError:
definitions = []
except Exception:
# print to stdout, will be in :messages
definitions = []
print("Exception, this shouldn't happen.")
print(traceback.format_exc())
if not definitions:
echo_highlight('No documentation found for that.')
vim.command('return')
else:
docs = ['Docstring for %s\n%s\n%s' % (d.desc_with_module, '=' * 40, d.docstring())
if d.docstring() else '|No Docstring for %s|' % d for d in definitions]
text = ('\n' + '-' * 79 + '\n').join(docs)
vim.command('let l:doc = %s' % repr(PythonToVimStr(text)))
vim.command('let l:doc_lines = %s' % len(text.split('\n')))
return True
@catch_and_print_exceptions
def clear_call_signatures():
# Check if using command line | |
"""visualization.py: Functions for visualizing MAVE-NN models."""
# Standard imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import pdb
# Special plotting-related imports
#from matplotlib.colors import DivergingNorm, Normalize
from matplotlib.colors import TwoSlopeNorm, Normalize
from mpl_toolkits.axes_grid1 import make_axes_locatable
# MAVE-NN imports
from mavenn.src.error_handling import handle_errors, check
from mavenn.src.validate import validate_alphabet, validate_seqs
@handle_errors
def _get_45deg_mesh(mat):
"""Create X and Y grids rotated -45 degreees."""
# Define rotation matrix
theta = -np.pi / 4
R = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
# Define unrotated coordinates on
K = len(mat) + 1
grid1d = np.arange(0, K) - .5
X = np.tile(np.reshape(grid1d, [K, 1]), [1, K])
Y = np.tile(np.reshape(grid1d, [1, K]), [K, 1])
xy = np.array([X.ravel(), Y.ravel()])
# Rotate coordinates
xy_rot = R @ xy
X_rot = xy_rot[0, :].reshape(K, K)
Y_rot = xy_rot[1, :].reshape(K, K).T
return X_rot, Y_rot
@handle_errors
def heatmap(values,
alphabet,
seq=None,
seq_kwargs=None,
ax=None,
show_spines=False,
cbar=True,
cax=None,
clim=None,
clim_quantile=1,
ccenter=None,
cmap='coolwarm',
cmap_size="5%",
cmap_pad=0.1):
"""
Draw a heatmap illustrating an ``L`` x ``C`` matrix of values, where ``L`` is
sequence length and ``C`` is the alphabet size.
Parameters
----------
values: (np.ndarray)
Array of shape ``(L,C)`` that contains values to plot.
alphabet: (str, np.ndarray)
Alphabet name ``'dna'``, ``'rna'``, or ``'protein'``, or 1D array
containing characters in the alphabet.
seq: (str, None)
The sequence to show, if any, using dots plotted on top of the heatmap.
Must have length ``L`` and be comprised of characters in ``alphabet``.
seq_kwargs: (dict)
Arguments to pass to ``Axes.scatter()`` when drawing dots to illustrate
the characters in ``seq``.
ax: (matplotlib.axes.Axes)
The ``Axes`` object on which the heatmap will be drawn.
If ``None``, one will be created. If specified, ``cbar=True``,
and ``cax=None``, ``ax`` will be split in two to make room for a
colorbar.
show_spines: (bool)
Whether to show spines around the edges of the heatmap.
cbar: (bool)
Whether to draw a colorbar next to the heatmap.
cax: (matplotlib.axes.Axes, None)
The ``Axes`` object on which the colorbar will be drawn,
if requested. If ``None``, one will be created by splitting
``ax`` in two according to ``cmap_size`` and ``cmap_pad``.
clim: (list, None)
List of the form ``[cmin, cmax]``, specifying the maximum ``cmax``
and minimum ``cmin`` values spanned by the colormap. Overrides
``clim_quantile``.
clim_quantile: (float)
Must be a float in the range [0,1]. ``clim`` will be automatically
chosen to include this central quantile of values.
ccenter: (float)
Value at which to position the center of a diverging
colormap. Setting ``ccenter=0`` often makes sense.
cmap: (str, matplotlib.colors.Colormap)
Colormap to use.
cmap_size: (str)
Fraction of ``ax`` width to be used for the colorbar. For formatting
requirements, see the documentation for
``mpl_toolkits.axes_grid1.make_axes_locatable()``.
cmap_pad: (float)
Space between colorbar and the shrunken heatmap ``Axes``. For formatting
requirements, see the documentation for
``mpl_toolkits.axes_grid1.make_axes_locatable()``.
Returns
-------
ax: (matplotlib.axes.Axes)
``Axes`` object containing the heatmap.
cb: (matplotlib.colorbar.Colorbar, None)
Colorbar object linked to ``ax``, or ``None`` if no colorbar was drawn.
"""
alphabet = validate_alphabet(alphabet)
L, C = values.shape
# Set extent
xlim = [-.5, L - .5]
ylim = [-.5, C - .5]
# If wt_seq is set, validate it.
if seq:
seq = validate_seqs(seq, alphabet)
# Set color lims to central 95% quantile
if clim is None:
vals = values.ravel()
vals = vals[np.isfinite(vals)]
clim = np.quantile(vals, q=[(1 - clim_quantile) / 2,
1 - (1 - clim_quantile) / 2])
# Create axis if none already exists
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
# Needed to center colormap at zero
if ccenter is not None:
# Reset ccenter if is not compatible with clim
if (clim[0] > ccenter) or (clim[1] < ccenter):
ccenter = 0.5 * (clim[0] + clim[1])
norm = TwoSlopeNorm(vmin=clim[0], vcenter=ccenter, vmax=clim[1])
# Otherwise, use uncentered colormap
else:
norm = Normalize(vmin=clim[0], vmax=clim[1])
# Plot heatmap
x_edges = np.arange(L + 1) - .5
y_edges = np.arange(C + 1) - .5
im = ax.pcolormesh(x_edges,
y_edges,
values.T,
shading='flat',
cmap=cmap,
clim=clim,
norm=norm)
# Mark wt sequence
_ = np.newaxis
if seq:
# Set marker style
if seq_kwargs is None:
seq_kwargs = {'marker': '.', 'color': 'k', 's': 2}
# Get xy coords to plot
seq_arr = np.array(list(seq[0]))
xy = np.argwhere(seq_arr[:, _] == alphabet[_, :])
# Mark sequence
ax.scatter(xy[:, 0], xy[:, 1], **seq_kwargs)
#pdb.set_trace()
# Style plot
ax.set_ylim(ylim)
ax.set_xlim(xlim)
ax.set_yticks(range(C))
ax.set_yticklabels(alphabet, ha='center')
ax.invert_yaxis()
if not show_spines:
for loc, spine in ax.spines.items():
spine.set_visible(False)
# Create colorbar if requested, make one
if cbar:
if cax is None:
cax = make_axes_locatable(ax).new_horizontal(size=cmap_size,
pad=cmap_pad)
fig.add_axes(cax)
cb = plt.colorbar(im, cax=cax)
# Otherwise, return None for cb
else:
cb = None
return ax, cb
@handle_errors
def heatmap_pairwise(values,
alphabet,
seq=None,
seq_kwargs=None,
ax=None,
gpmap_type="pairwise",
show_position=False,
position_size=None,
position_pad=1,
show_alphabet=True,
alphabet_size=None,
alphabet_pad=1,
show_seplines=True,
sepline_kwargs=None,
xlim_pad=.1,
ylim_pad=.1,
cbar=True,
cax=None,
clim=None,
clim_quantile=1,
ccenter=0,
cmap='coolwarm',
cmap_size="5%",
cmap_pad=0.1):
"""
Draw a heatmap illustrating pairwise or neighbor values, e.g. representing
model parameters, mutational effects, etc.
Note: The resulting plot has aspect ratio of 1 and is scaled so that pixels
have half-diagonal lengths given by ``half_pixel_diag = 1/(C*2)``, and
blocks of characters have half-diagonal lengths given by
``half_block_diag = 1/2``. This is done so that the horizontal distance
between positions (as indicated by x-ticks) is 1.
Parameters
----------
values: (np.array)
An array, shape ``(L,C,L,C)``, containing pairwise or neighbor values.
Note that only values at coordinates ``[l1, c1, l2, c2]`` with
``l2`` > ``l1`` will be plotted. NaN values will not be plotted.
alphabet: (str, np.ndarray)
Alphabet name ``'dna'``, ``'rna'``, or ``'protein'``, or 1D array
containing characters in the alphabet.
seq: (str, None)
The sequence to show, if any, using dots plotted on top of the heatmap.
Must have length ``L`` and be comprised of characters in ``alphabet``.
seq_kwargs: (dict)
Arguments to pass to ``Axes.scatter()`` when drawing dots to illustrate
the characters in ``seq``.
ax: (matplotlib.axes.Axes)
The ``Axes`` object on which the heatmap will be drawn.
If ``None``, one will be created. If specified, ``cbar=True``,
and ``cax=None``, ``ax`` will be split in two to make room for a
colorbar.
gpmap_type: (str)
Determines how many pairwise parameters are plotted.
Must be ``'pairwise'`` or ``'neighbor'``. If ``'pairwise'``, a
triangular heatmap will be plotted. If ``'neighbor'``, a heatmap
resembling a string of diamonds will be plotted.
show_position: (bool)
Whether to annotate the heatmap with position labels.
position_size: (float)
Font size to use for position labels. Must be >= 0.
position_pad: (float)
Additional padding, in units of ``half_pixel_diag``, used to space
the position labels further from the heatmap.
show_alphabet: (bool)
Whether to annotate the heatmap with character labels.
alphabet_size: (float)
Font size to use for alphabet. Must be >= 0.
alphabet_pad: (float)
Additional padding, in units of ``half_pixel_diag``, used to space
the alphabet labels from the heatmap.
show_seplines: (bool)
Whether to draw lines separating character blocks for different
position pairs.
sepline_kwargs: (dict)
Keywords to pass to ``Axes.plot()`` when drawing seplines.
xlim_pad: (float)
Additional padding to add (in absolute units) both left and right of
the heatmap.
ylim_pad: (float)
Additional padding to add (in absolute units) both above and below the
heatmap.
cbar: (bool)
Whether to draw a colorbar next to the heatmap.
cax: (matplotlib.axes.Axes, None)
The ``Axes`` object on which the colorbar will be drawn, if requested.
If ``None``, one will be created by splitting ``ax`` in two according
to ``cmap_size`` and ``cmap_pad``.
clim: (list, None)
List of the form ``[cmin, cmax]``, specifying the maximum ``cmax``
and minimum ``cmin`` values spanned by the colormap. Overrides
``clim_quantile``.
clim_quantile: (float)
Must be a float in the range [0,1]. ``clim`` will be automatically
chosen to include this central quantile of values.
ccenter: (float)
Value at which to position the center of a diverging
colormap. Setting ``ccenter=0`` often makes sense.
cmap: (str, matplotlib.colors.Colormap)
Colormap to use.
cmap_size: (str)
Fraction of ``ax`` width to be used for the colorbar. For formatting
requirements, see the documentation for
``mpl_toolkits.axes_grid1.make_axes_locatable()``.
cmap_pad: (float)
Space between colorbar and the shrunken heatmap ``Axes``. For formatting
requirements, see the documentation for
``mpl_toolkits.axes_grid1.make_axes_locatable()``.
Returns
-------
ax: (matplotlib.axes.Axes)
``Axes`` object containing the heatmap.
cb: (matplotlib.colorbar.Colorbar, None)
Colorbar object linked to ``ax``, or ``None`` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.