text stringlengths 957 885k |
|---|
<gh_stars>0
from __future__ import print_function
import msgpackrpc #install as admin: pip install msgpack-rpc-python
import numpy as np #pip install numpy
import msgpack
import math
import time
import sys
import os
import inspect
import types
import re
class MsgpackMixin:
def to_msgpack(self, *args, **kwargs):
return self.__dict__ #msgpack.dump(self.to_dict(*args, **kwargs))
@classmethod
def from_msgpack(cls, encoded):
obj = cls()
obj.__dict__ = {k.decode('utf-8'): v for k, v in encoded.items()}
return obj
class Vector3r(MsgpackMixin):
x_val = np.float32(0)
y_val = np.float32(0)
z_val = np.float32(0)
def __init__(self, x_val = np.float32(0), y_val = np.float32(0), z_val = np.float32(0)):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
class Quaternionr(MsgpackMixin):
w_val = np.float32(0)
x_val = np.float32(0)
y_val = np.float32(0)
z_val = np.float32(0)
def __init__(self, x_val = np.float32(0), y_val = np.float32(0), z_val = np.float32(0), w_val = np.float32(1)):
self.x_val = x_val
self.y_val = y_val
self.z_val = z_val
self.w_val = w_val
class AirSimImageType:
Scene = 0
DepthMeters = 1
DepthVis = 2
DisparityNormalized = 3
Segmentation = 4
SurfaceNormals = 5
class ImageRequest(MsgpackMixin):
camera_id = np.uint8(0)
image_type = AirSimImageType.Scene
pixels_as_float = False
compress = False
def __init__(self, camera_id, image_type, pixels_as_float = False, compress = True):
self.camera_id = camera_id
self.image_type = image_type
self.pixels_as_float = pixels_as_float
self.compress = compress
class ImageResponse(MsgpackMixin):
image_data_uint8 = np.uint8(0)
image_data_float = np.float32(0)
camera_position = Vector3r()
camera_orientation = Quaternionr()
time_stamp = np.uint64(0)
message = ''
pixels_as_float = np.float32(0)
compress = True
width = 0
height = 0
image_type = AirSimImageType.Scene
class DragonflyClient:
def __init__(self, ip="127.0.0.1", port=41451):
self.client = msgpackrpc.Client(msgpackrpc.Address(ip, port), timeout = 5)
# basic flight control
def moveByDistance(self, dx, dy, dz):
result = self.client.call('moveByDistance', dx, dy, dz)
time.sleep(0.1)
return result
def turnByDegree(self, degree):
result = self.client.call('turnByDegree', degree)
time.sleep(0.1)
return result
def isHit(self):
result = self.client.call('isHit')
return result
def reset(self):
result = self.client.call('reset')
time.sleep(0.1)
return result
# camera control
# simGetImage returns compressed png in array of bytes
# image_type uses one of the AirSimImageType members
def simGetImages(self, requests):
responses_raw = self.client.call('simGetImages', requests)
return [ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]
def simSetPose(self, pose, ignore_collison):
self.client.call('simSetPose', pose, ignore_collison)
def simGetPose(self):
return self.client.call('simGetPose')
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/paraparser.py
__version__=''' $Id$ '''
__doc__='''The parser used to process markup within paragraphs'''
import string
import re
from types import TupleType, UnicodeType, StringType
import sys
import os
import copy
import base64
try:
import cPickle as pickle
except:
import pickle
import unicodedata
import reportlab.lib.sequencer
from reportlab.lib.abag import ABag
from reportlab.lib.utils import ImageReader
from reportlab.lib import xmllib
from reportlab.lib.colors import toColor, white, black, red, Color
from reportlab.lib.fonts import tt2ps, ps2tt
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.units import inch,mm,cm,pica
_re_para = re.compile(r'^\s*<\s*para(?:\s+|>|/>)')
sizeDelta = 2 # amount to reduce font size by for super and sub script
subFraction = 0.5 # fraction of font size that a sub script should be lowered
superFraction = 0.5 # fraction of font size that a super script should be raised
DEFAULT_INDEX_NAME='_indexAdd'
def _convnum(s, unit=1, allowRelative=True):
if s[0] in ('+','-') and allowRelative:
try:
return ('relative',int(s)*unit)
except ValueError:
return ('relative',float(s)*unit)
else:
try:
return int(s)*unit
except ValueError:
return float(s)*unit
def _num(s, unit=1, allowRelative=True):
"""Convert a string like '10cm' to an int or float (in points).
The default unit is point, but optionally you can use other
default units like mm.
"""
if s.endswith('cm'):
unit=cm
s = s[:-2]
if s.endswith('in'):
unit=inch
s = s[:-2]
if s.endswith('pt'):
unit=1
s = s[:-2]
if s.endswith('i'):
unit=inch
s = s[:-1]
if s.endswith('mm'):
unit=mm
s = s[:-2]
if s.endswith('pica'):
unit=pica
s = s[:-4]
return _convnum(s,unit,allowRelative)
def _numpct(s,unit=1,allowRelative=False):
if s.endswith('%'):
return _PCT(_convnum(s[:-1],allowRelative=allowRelative))
else:
return _num(s,unit,allowRelative)
class _PCT:
def __init__(self,v):
self._value = v*0.01
def normalizedValue(self,normalizer):
normalizer = normalizer or getattr(self,'_normalizer')
return normalizer*self._value
def _valignpc(s):
s = s.lower()
if s in ('baseline','sub','super','top','text-top','middle','bottom','text-bottom'):
return s
if s.endswith('%'):
n = _convnum(s[:-1])
if isinstance(n,tuple):
n = n[1]
return _PCT(n)
n = _num(s)
if isinstance(n,tuple):
n = n[1]
return n
def _autoLeading(x):
x = x.lower()
if x in ('','min','max','off'):
return x
raise ValueError('Invalid autoLeading=%r' % x )
def _align(s):
s = string.lower(s)
if s=='left': return TA_LEFT
elif s=='right': return TA_RIGHT
elif s=='justify': return TA_JUSTIFY
elif s in ('centre','center'): return TA_CENTER
else: raise ValueError
_paraAttrMap = {'font': ('fontName', None),
'face': ('fontName', None),
'fontsize': ('fontSize', _num),
'size': ('fontSize', _num),
'leading': ('leading', _num),
'autoleading': ('autoLeading', _autoLeading),
'lindent': ('leftIndent', _num),
'rindent': ('rightIndent', _num),
'findent': ('firstLineIndent', _num),
'align': ('alignment', _align),
'spaceb': ('spaceBefore', _num),
'spacea': ('spaceAfter', _num),
'bfont': ('bulletFontName', None),
'bfontsize': ('bulletFontSize',_num),
'boffsety': ('bulletOffsetY',_num),
'bindent': ('bulletIndent',_num),
'bcolor': ('bulletColor',toColor),
'color':('textColor',toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'bg':('backColor',toColor),
'fg': ('textColor',toColor),
}
_bulletAttrMap = {
'font': ('bulletFontName', None),
'face': ('bulletFontName', None),
'size': ('bulletFontSize',_num),
'fontsize': ('bulletFontSize',_num),
'offsety': ('bulletOffsetY',_num),
'indent': ('bulletIndent',_num),
'color': ('bulletColor',toColor),
'fg': ('bulletColor',toColor),
}
#things which are valid font attributes
_fontAttrMap = {'size': ('fontSize', _num),
'face': ('fontName', None),
'name': ('fontName', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
}
#things which are valid span attributes
_spanAttrMap = {'size': ('fontSize', _num),
'face': ('fontName', None),
'name': ('fontName', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'style': ('style',None),
}
#things which are valid font attributes
_linkAttrMap = {'size': ('fontSize', _num),
'face': ('fontName', None),
'name': ('fontName', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'dest': ('link', None),
'destination': ('link', None),
'target': ('link', None),
'href': ('link', None),
}
_anchorAttrMap = {'fontSize': ('fontSize', _num),
'fontName': ('fontName', None),
'name': ('name', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'href': ('href', None),
}
_imgAttrMap = {
'src': ('src', None),
'width': ('width',_numpct),
'height':('height',_numpct),
'valign':('valign',_valignpc),
}
_indexAttrMap = {
'name': ('name',None),
'item': ('item',None),
'offset': ('offset',None),
'format': ('format',None),
}
def _addAttributeNames(m):
K = m.keys()
for k in K:
n = m[k][0]
if n not in m: m[n] = m[k]
n = string.lower(n)
if n not in m: m[n] = m[k]
_addAttributeNames(_paraAttrMap)
_addAttributeNames(_fontAttrMap)
_addAttributeNames(_spanAttrMap)
_addAttributeNames(_bulletAttrMap)
_addAttributeNames(_anchorAttrMap)
_addAttributeNames(_linkAttrMap)
def _applyAttributes(obj, attr):
for k, v in attr.items():
if type(v) is TupleType and v[0]=='relative':
#AR 20/5/2000 - remove 1.5.2-ism
#v = v[1]+getattr(obj,k,0)
if hasattr(obj, k):
v = v[1]+getattr(obj,k)
else:
v = v[1]
setattr(obj,k,v)
#Named character entities intended to be supported from the special font
#with additions suggested by <NAME> who also suggested the
#numeric entity names that follow.
greeks = {
'Aacute': '\xc3\x81',
'aacute': '\xc3\xa1',
'Acirc': '\xc3\x82',
'acirc': '\xc3\xa2',
'acute': '\xc2\xb4',
'AElig': '\xc3\x86',
'aelig': '\xc3\xa6',
'Agrave': '\xc3\x80',
'agrave': '\xc3\xa0',
'alefsym': '\xe2\x84\xb5',
'Alpha': '\xce\x91',
'alpha': '\xce\xb1',
'and': '\xe2\x88\xa7',
'ang': '\xe2\x88\xa0',
'Aring': '\xc3\x85',
'aring': '\xc3\xa5',
'asymp': '\xe2\x89\x88',
'Atilde': '\xc3\x83',
'atilde': '\xc3\xa3',
'Auml': '\xc3\x84',
'auml': '\xc3\xa4',
'bdquo': '\xe2\x80\x9e',
'Beta': '\xce\x92',
'beta': '\xce\xb2',
'brvbar': '\xc2\xa6',
'bull': '\xe2\x80\xa2',
'cap': '\xe2\x88\xa9',
'Ccedil': '\xc3\x87',
'ccedil': '\xc3\xa7',
'cedil': '\xc2\xb8',
'cent': '\xc2\xa2',
'Chi': '\xce\xa7',
'chi': '\xcf\x87',
'circ': '\xcb\x86',
'clubs': '\xe2\x99\xa3',
'cong': '\xe2\x89\x85',
'copy': '\xc2\xa9',
'crarr': '\xe2\x86\xb5',
'cup': '\xe2\x88\xaa',
'curren': '\xc2\xa4',
'dagger': '\xe2\x80\xa0',
'Dagger': '\xe2\x80\xa1',
'darr': '\xe2\x86\x93',
'dArr': '\xe2\x87\x93',
'deg': '\xc2\xb0',
'delta': '\xce\xb4',
'Delta': '\xe2\x88\x86',
'diams': '\xe2\x99\xa6',
'divide': '\xc3\xb7',
'Eacute': '\xc3\x89',
'eacute': '\xc3\xa9',
'Ecirc': '\xc3\x8a',
'ecirc': '\xc3\xaa',
'Egrave': '\xc3\x88',
'egrave': '\xc3\xa8',
'empty': '\xe2\x88\x85',
'emsp': '\xe2\x80\x83',
'ensp': '\xe2\x80\x82',
'Epsilon': '\xce\x95',
'epsilon': '\xce\xb5',
'epsiv': '\xce\xb5',
'equiv': '\xe2\x89\xa1',
'Eta': '\xce\x97',
'eta': '\xce\xb7',
'ETH': '\xc3\x90',
'eth': '\xc3\xb0',
'Euml': '\xc3\x8b',
'euml': '\xc3\xab',
'euro': '\xe2\x82\xac',
'exist': '\xe2\x88\x83',
'fnof': '\xc6\x92',
'forall': '\xe2\x88\x80',
'frac12': '\xc2\xbd',
'frac14': '\xc2\xbc',
'frac34': '\xc2\xbe',
'frasl': '\xe2\x81\x84',
'Gamma': '\xce\x93',
'gamma': '\xce\xb3',
'ge': '\xe2\x89\xa5',
'harr': '\xe2\x86\x94',
'hArr': '\xe2\x87\x94',
'hearts': '\xe2\x99\xa5',
'hellip': '\xe2\x80\xa6',
'Iacute': '\xc3\x8d',
'iacute': '\xc3\xad',
'Icirc': '\xc3\x8e',
'icirc': '\xc3\xae',
'iexcl': '\xc2\xa1',
'Igrave': '\xc3\x8c',
'igrave': '\xc3\xac',
'image': '\xe2\x84\x91',
'infin': '\xe2\x88\x9e',
'int': '\xe2\x88\xab',
'Iota': '\xce\x99',
'iota': '\xce\xb9',
'iquest': '\xc2\xbf',
'isin': '\xe2\x88\x88',
'Iuml': '\xc3\x8f',
'iuml': '\xc3\xaf',
'Kappa': '\xce\x9a',
'kappa': '\xce\xba',
'Lambda': '\xce\x9b',
'lambda': '\xce\xbb',
'lang': '\xe2\x8c\xa9',
'laquo': '\xc2\xab',
'larr': '\xe2\x86\x90',
'lArr': '\xe2\x87\x90',
'lceil': '\xef\xa3\xae',
'ldquo': '\xe2\x80\x9c',
'le': '\xe2\x89\xa4',
'lfloor': '\xef\xa3\xb0',
'lowast': '\xe2\x88\x97',
'loz': '\xe2\x97\x8a',
'lrm': '\xe2\x80\x8e',
'lsaquo': '\xe2\x80\xb9',
'lsquo': '\xe2\x80\x98',
'macr': '\xc2\xaf',
'mdash': '\xe2\x80\x94',
'micro': '\xc2\xb5',
'middot': '\xc2\xb7',
'minus': '\xe2\x88\x92',
'mu': '\xc2\xb5',
'Mu': '\xce\x9c',
'nabla': '\xe2\x88\x87',
'nbsp': '\xc2\xa0',
'ndash': '\xe2\x80\x93',
'ne': '\xe2\x89\xa0',
'ni': '\xe2\x88\x8b',
'notin': '\xe2\x88\x89',
'not': '\xc2\xac',
'nsub': '\xe2\x8a\x84',
'Ntilde': '\xc3\x91',
'ntilde': '\xc3\xb1',
'Nu': '\xce\x9d',
'nu': '\xce\xbd',
'Oacute': '\xc3\x93',
'oacute': '\xc3\xb3',
'Ocirc': '\xc3\x94',
'ocirc': '\xc3\xb4',
'OElig': '\xc5\x92',
'oelig': '\xc5\x93',
'Ograve': '\xc3\x92',
'ograve': '\xc3\xb2',
'oline': '\xef\xa3\xa5',
'omega': '\xcf\x89',
'Omega': '\xe2\x84\xa6',
'Omicron': '\xce\x9f',
'omicron': '\xce\xbf',
'oplus': '\xe2\x8a\x95',
'ordf': '\xc2\xaa',
'ordm': '\xc2\xba',
'or': '\xe2\x88\xa8',
'Oslash': '\xc3\x98',
'oslash': '\xc3\xb8',
'Otilde': '\xc3\x95',
'otilde': '\xc3\xb5',
'otimes': '\xe2\x8a\x97',
'Ouml': '\xc3\x96',
'ouml': '\xc3\xb6',
'para': '\xc2\xb6',
'part': '\xe2\x88\x82',
'permil': '\xe2\x80\xb0',
'perp': '\xe2\x8a\xa5',
'phis': '\xcf\x86',
'Phi': '\xce\xa6',
'phi': '\xcf\x95',
'piv': '\xcf\x96',
'Pi': '\xce\xa0',
'pi': '\xcf\x80',
'plusmn': '\xc2\xb1',
'pound': '\xc2\xa3',
'prime': '\xe2\x80\xb2',
'Prime': '\xe2\x80\xb3',
'prod': '\xe2\x88\x8f',
'prop': '\xe2\x88\x9d',
'Psi': '\xce\xa8',
'psi': '\xcf\x88',
'radic': '\xe2\x88\x9a',
'rang': '\xe2\x8c\xaa',
'raquo': '\xc2\xbb',
'rarr': '\xe2\x86\x92',
'rArr': '\xe2\x87\x92',
'rceil': '\xef\xa3\xb9',
'rdquo': '\xe2\x80\x9d',
'real': '\xe2\x84\x9c',
'reg': '\xc2\xae',
'rfloor': '\xef\xa3\xbb',
'Rho': '\xce\xa1',
'rho': '\xcf\x81',
'rlm': '\xe2\x80\x8f',
'rsaquo': '\xe2\x80\xba',
'rsquo': '\xe2\x80\x99',
'sbquo': '\xe2\x80\x9a',
'Scaron': '\xc5\xa0',
'scaron': '\xc5\xa1',
'sdot': '\xe2\x8b\x85',
'sect': '\xc2\xa7',
'shy': '\xc2\xad',
'sigmaf': '\xcf\x82',
'sigmav': '\xcf\x82',
'Sigma': '\xce\xa3',
'sigma': '\xcf\x83',
'sim': '\xe2\x88\xbc',
'spades': '\xe2\x99\xa0',
'sube': '\xe2\x8a\x86',
'sub': '\xe2\x8a\x82',
'sum': '\xe2\x88\x91',
'sup1': '\xc2\xb9',
'sup2': '\xc2\xb2',
'sup3': '\xc2\xb3',
'supe': '\xe2\x8a\x87',
'sup': '\xe2\x8a\x83',
'szlig': '\xc3\x9f',
'Tau': '\xce\xa4',
'tau': '\xcf\x84',
'there4': '\xe2\x88\xb4',
'thetasym': '\xcf\x91',
'thetav': '\xcf\x91',
'Theta': '\xce\x98',
'theta': '\xce\xb8',
'thinsp': '\xe2\x80\x89',
'THORN': '\xc3\x9e',
'thorn': '\xc3\xbe',
'tilde': '\xcb\x9c',
'times': '\xc3\x97',
'trade': '\xef\xa3\xaa',
'Uacute': '\xc3\x9a',
'uacute': '\xc3\xba',
'uarr': '\xe2\x86\x91',
'uArr': '\xe2\x87\x91',
'Ucirc': '\xc3\x9b',
'ucirc': '\xc3\xbb',
'Ugrave': '\xc3\x99',
'ugrave': '\xc3\xb9',
'uml': '\xc2\xa8',
'upsih': '\xcf\x92',
'Upsilon': '\xce\xa5',
'upsilon': '\xcf\x85',
'Uuml': '\xc3\x9c',
'uuml': '\xc3\xbc',
'weierp': '\xe2\x84\x98',
'Xi': '\xce\x9e',
'xi': '\xce\xbe',
'Yacute': '\xc3\x9d',
'yacute': '\xc3\xbd',
'yen': '\xc2\xa5',
'yuml': '\xc3\xbf',
'Yuml': '\xc5\xb8',
'Zeta': '\xce\x96',
'zeta': '\xce\xb6',
'zwj': '\xe2\x80\x8d',
'zwnj': '\xe2\x80\x8c',
}
#------------------------------------------------------------------------
class ParaFrag(ABag):
"""class ParaFrag contains the intermediate representation of string
segments as they are being parsed by the XMLParser.
fontname, fontSize, rise, textColor, cbDefn
"""
_greek2Utf8=None
def _greekConvert(data):
global _greek2Utf8
if not _greek2Utf8:
from reportlab.pdfbase.rl_codecs import RL_Codecs
import codecs
dm = decoding_map = codecs.make_identity_dict(xrange(32,256))
for k in xrange(0,32):
dm[k] = None
dm.update(RL_Codecs._RL_Codecs__rl_codecs_data['symbol'][0])
_greek2Utf8 = {}
for k,v in dm.iteritems():
if not v:
u = '\0'
else:
u = unichr(v).encode('utf8')
_greek2Utf8[chr(k)] = u
return ''.join(map(_greek2Utf8.__getitem__,data))
#------------------------------------------------------------------
# !!! NOTE !!! THIS TEXT IS NOW REPLICATED IN PARAGRAPH.PY !!!
# The ParaFormatter will be able to format the following
# tags:
# < /b > - bold
# < /i > - italics
# < u > < /u > - underline
# < strike > < /strike > - strike through
# < super > < /super > - superscript
# < sup > < /sup > - superscript
# < sub > < /sub > - subscript
# <font name=fontfamily/fontname color=colorname size=float>
# <span name=fontfamily/fontname color=colorname backcolor=colorname size=float style=stylename>
# < bullet > </bullet> - bullet text (at head of para only)
# <onDraw name=callable label="a label"/>
# <index [name="callablecanvasattribute"] label="a label"/>
# <link>link text</link>
# attributes of links
# size/fontSize=num
# name/face/fontName=name
# fg/textColor/color=color
# backcolor/backColor/bgcolor=color
# dest/destination/target/href/link=target
# <a>anchor text</a>
# attributes of anchors
# fontSize=num
# fontName=name
# fg/textColor/color=color
# backcolor/backColor/bgcolor=color
# href=href
# <a name="anchorpoint"/>
# <unichar name="unicode character name"/>
# <unichar value="unicode code point"/>
# <img src="path" width="1in" height="1in" valign="bottom"/>
# width="w%" --> fontSize*w/100 idea from <NAME>
# height="h%" --> linewidth*h/100 <<EMAIL>>
# <greek> - </greek>
#
# The whole may be surrounded by <para> </para> tags
#
# It will also be able to handle any MathML specified Greek characters.
#------------------------------------------------------------------
class ParaParser(xmllib.XMLParser):
#----------------------------------------------------------
# First we will define all of the xml tag handler functions.
#
# start_<tag>(attributes)
# end_<tag>()
#
# While parsing the xml ParaFormatter will call these
# functions to handle the string formatting tags.
# At the start of each tag the corresponding field will
# be set to 1 and at the end tag the corresponding field will
# be set to 0. Then when handle_data is called the options
# for that data will be aparent by the current settings.
#----------------------------------------------------------
def __getattr__( self, attrName ):
"""This way we can handle <TAG> the same way as <tag> (ignoring case)."""
if attrName!=attrName.lower() and attrName!="caseSensitive" and not self.caseSensitive and \
(attrName.startswith("start_") or attrName.startswith("end_")):
return getattr(self,attrName.lower())
raise AttributeError, attrName
#### bold
def start_b( self, attributes ):
self._push(bold=1)
def end_b( self ):
self._pop(bold=1)
def start_strong( self, attributes ):
self._push(bold=1)
def end_strong( self ):
self._pop(bold=1)
#### italics
def start_i( self, attributes ):
self._push(italic=1)
def end_i( self ):
self._pop(italic=1)
def start_em( self, attributes ):
self._push(italic=1)
def end_em( self ):
self._pop(italic=1)
#### underline
def start_u( self, attributes ):
self._push(underline=1)
def end_u( self ):
self._pop(underline=1)
#### strike
def start_strike( self, attributes ):
self._push(strike=1)
def end_strike( self ):
self._pop(strike=1)
#### link
def start_link(self, attributes):
self._push(**self.getAttributes(attributes,_linkAttrMap))
def end_link(self):
frag = self._stack[-1]
del self._stack[-1]
assert frag.link!=None
#### anchor
def start_a(self, attributes):
A = self.getAttributes(attributes,_anchorAttrMap)
name = A.get('name',None)
if name is not None:
name = name.strip()
if not name:
self._syntax_error('<a name="..."/> anchor variant requires non-blank name')
if len(A)>1:
self._syntax_error('<a name="..."/> anchor variant only allows name attribute')
A = dict(name=A['name'])
A['_selfClosingTag'] = 'anchor'
else:
href = A.get('href','').strip()
if not href:
self._syntax_error('<a> tag must have non-blank name or href attribute')
A['link'] = href #convert to our link form
A.pop('href')
self._push(**A)
def end_a(self):
frag = self._stack[-1]
sct = getattr(frag,'_selfClosingTag','')
if sct:
assert sct=='anchor' and frag.name,'Parser failure in <a/>'
defn = frag.cbDefn = ABag()
defn.label = defn.kind = 'anchor'
defn.name = frag.name
del frag.name, frag._selfClosingTag
self.handle_data('')
self._pop()
else:
del self._stack[-1]
assert frag.link!=None
def start_img(self,attributes):
A = self.getAttributes(attributes,_imgAttrMap)
if not A.get('src'):
self._syntax_error('<img> needs src attribute')
A['_selfClosingTag'] = 'img'
self._push(**A)
def end_img(self):
frag = self._stack[-1]
assert getattr(frag,'_selfClosingTag',''),'Parser failure in <img/>'
defn = frag.cbDefn = ABag()
defn.kind = 'img'
defn.src = getattr(frag,'src',None)
defn.image = ImageReader(defn.src)
size = defn.image.getSize()
defn.width = getattr(frag,'width',size[0])
defn.height = getattr(frag,'height',size[1])
defn.valign = getattr(frag,'valign','bottom')
del frag._selfClosingTag
self.handle_data('')
self._pop()
#### super script
def start_super( self, attributes ):
self._push(super=1)
def end_super( self ):
self._pop(super=1)
start_sup = start_super
end_sup = end_super
#### sub script
def start_sub( self, attributes ):
self._push(sub=1)
def end_sub( self ):
self._pop(sub=1)
#### greek script
#### add symbol encoding
def handle_charref(self, name):
try:
if name[0]=='x':
n = int(name[1:],16)
else:
n = int(name)
except ValueError:
self.unknown_charref(name)
return
self.handle_data(unichr(n).encode('utf8'))
def handle_entityref(self,name):
if name in greeks:
self.handle_data(greeks[name])
else:
xmllib.XMLParser.handle_entityref(self,name)
def syntax_error(self,lineno,message):
self._syntax_error(message)
def _syntax_error(self,message):
if message[:10]=="attribute " and message[-17:]==" value not quoted": return
self.errors.append(message)
def start_greek(self, attr):
self._push(greek=1)
def end_greek(self):
self._pop(greek=1)
def start_unichar(self, attr):
if 'name' in attr:
if 'code' in attr:
self._syntax_error('<unichar/> invalid with both name and code attributes')
try:
v = unicodedata.lookup(attr['name']).encode('utf8')
except KeyError:
self._syntax_error('<unichar/> invalid name attribute\n"%s"' % name)
v = '\0'
elif 'code' in attr:
try:
v = unichr(int(eval(attr['code']))).encode('utf8')
except:
self._syntax_error('<unichar/> invalid code attribute %s' % attr['code'])
v = '\0'
else:
v = None
if attr:
self._syntax_error('<unichar/> invalid attribute %s' % attr.keys()[0])
if v is not None:
self.handle_data(v)
self._push(_selfClosingTag='unichar')
def end_unichar(self):
self._pop()
def start_font(self,attr):
self._push(**self.getAttributes(attr,_fontAttrMap))
def end_font(self):
self._pop()
def start_span(self,attr):
A = self.getAttributes(attr,_spanAttrMap)
if 'style' in A:
style = self.findSpanStyle(A.pop('style'))
D = {}
for k in 'fontName fontSize textColor backColor'.split():
v = getattr(style,k,self)
if v is self: continue
D[k] = v
D.update(A)
A = D
self._push(**A)
end_span = end_font
def start_br(self, attr):
#just do the trick to make sure there is no content
self._push(_selfClosingTag='br',lineBreak=True,text='')
def end_br(self):
frag = self._stack[-1]
assert frag._selfClosingTag=='br' and frag.lineBreak,'Parser failure in <br/>'
del frag._selfClosingTag
self.handle_data('')
self._pop()
def _initial_frag(self,attr,attrMap,bullet=0):
style = self._style
if attr!={}:
style = copy.deepcopy(style)
_applyAttributes(style,self.getAttributes(attr,attrMap))
self._style = style
# initialize semantic values
frag = ParaFrag()
frag.sub = 0
frag.super = 0
frag.rise = 0
frag.underline = 0
frag.strike = 0
frag.greek = 0
frag.link = None
if bullet:
frag.fontName, frag.bold, frag.italic = ps2tt(style.bulletFontName)
frag.fontSize = style.bulletFontSize
frag.textColor = hasattr(style,'bulletColor') and style.bulletColor or style.textColor
else:
frag.fontName, frag.bold, frag.italic = ps2tt(style.fontName)
frag.fontSize = style.fontSize
frag.textColor = style.textColor
return frag
def start_para(self,attr):
self._stack = [self._initial_frag(attr,_paraAttrMap)]
def end_para(self):
self._pop()
def start_bullet(self,attr):
if hasattr(self,'bFragList'):
self._syntax_error('only one <bullet> tag allowed')
self.bFragList = []
frag = self._initial_frag(attr,_bulletAttrMap,1)
frag.isBullet = 1
self._stack.append(frag)
def end_bullet(self):
self._pop()
#---------------------------------------------------------------
def start_seqdefault(self, attr):
try:
default = attr['id']
except KeyError:
default = None
self._seq.setDefaultCounter(default)
def end_seqdefault(self):
pass
def start_seqreset(self, attr):
try:
id = attr['id']
except KeyError:
id = None
try:
base = int(attr['base'])
except:
base=0
self._seq.reset(id, base)
def end_seqreset(self):
pass
def start_seqchain(self, attr):
try:
order = attr['order']
except KeyError:
order = ''
order = order.split()
seq = self._seq
for p,c in zip(order[:-1],order[1:]):
seq.chain(p, c)
end_seqchain = end_seqreset
def start_seqformat(self, attr):
try:
id = attr['id']
except KeyError:
id = None
try:
value = attr['value']
except KeyError:
value = '1'
self._seq.setFormat(id,value)
end_seqformat = end_seqreset
# AR hacking in aliases to allow the proper casing for RML.
# the above ones should be deprecated over time. 2001-03-22
start_seqDefault = start_seqdefault
end_seqDefault = end_seqdefault
start_seqReset = start_seqreset
end_seqReset = end_seqreset
start_seqChain = start_seqchain
end_seqChain = end_seqchain
start_seqFormat = start_seqformat
end_seqFormat = end_seqformat
def start_seq(self, attr):
#if it has a template, use that; otherwise try for id;
#otherwise take default sequence
if 'template' in attr:
templ = attr['template']
self.handle_data(templ % self._seq)
return
elif 'id' in attr:
id = attr['id']
else:
id = None
increment = attr.get('inc', None)
if not increment:
output = self._seq.nextf(id)
else:
#accepts "no" for do not increment, or an integer.
#thus, 0 and 1 increment by the right amounts.
if increment.lower() == 'no':
output = self._seq.thisf(id)
else:
incr = int(increment)
output = self._seq.thisf(id)
self._seq.reset(id, self._seq._this() + incr)
self.handle_data(output)
def end_seq(self):
pass
def start_onDraw(self,attr):
defn = ABag()
if 'name' in attr: defn.name = attr['name']
else: self._syntax_error('<onDraw> needs at least a name attribute')
if 'label' in attr: defn.label = attr['label']
defn.kind='onDraw'
self._push(cbDefn=defn)
self.handle_data('')
self._pop()
end_onDraw=end_seq
def start_index(self,attr):
attr=self.getAttributes(attr,_indexAttrMap)
defn = ABag()
if 'item' in attr:
label = attr['item']
else:
self._syntax_error('<index> needs at least an item attribute')
if 'name' in attr:
name = attr['name']
else:
name = DEFAULT_INDEX_NAME
format = attr.get('format',None)
if format is not None and format not in ('123','I','i','ABC','abc'):
raise ValueError('index tag format is %r not valid 123 I i ABC or abc' % offset)
offset = attr.get('offset',None)
if offset is not None:
try:
offset = int(offset)
except:
raise ValueError('index tag offset is %r not an int' % offset)
defn.label = base64.encodestring(pickle.dumps((label,format,offset))).strip()
defn.name = name
defn.kind='index'
self._push(cbDefn=defn)
self.handle_data('')
self._pop()
end_index=end_seq
#---------------------------------------------------------------
def _push(self,**attr):
frag = copy.copy(self._stack[-1])
_applyAttributes(frag,attr)
self._stack.append(frag)
def _pop(self,**kw):
frag = self._stack[-1]
del self._stack[-1]
for k, v in kw.items():
assert getattr(frag,k)==v
return frag
def getAttributes(self,attr,attrMap):
A = {}
for k, v in attr.items():
if not self.caseSensitive:
k = string.lower(k)
if k in attrMap.keys():
j = attrMap[k]
func = j[1]
try:
A[j[0]] = (func is None) and v or func(v)
except:
self._syntax_error('%s: invalid value %s'%(k,v))
else:
self._syntax_error('invalid attribute name %s'%k)
return A
#----------------------------------------------------------------
def __init__(self,verbose=0):
self.caseSensitive = 0
xmllib.XMLParser.__init__(self,verbose=verbose)
def _iReset(self):
self.fragList = []
if hasattr(self, 'bFragList'): delattr(self,'bFragList')
def _reset(self, style):
'''reset the parser'''
xmllib.XMLParser.reset(self)
# initialize list of string segments to empty
self.errors = []
self._style = style
self._iReset()
#----------------------------------------------------------------
def handle_data(self,data):
"Creates an intermediate representation of string segments."
frag = copy.copy(self._stack[-1])
if hasattr(frag,'cbDefn'):
kind = frag.cbDefn.kind
if data: self._syntax_error('Only empty <%s> tag allowed' % kind)
elif hasattr(frag,'_selfClosingTag'):
if data!='': self._syntax_error('No content allowed in %s tag' % frag._selfClosingTag)
return
else:
# if sub and super are both on they will cancel each other out
if frag.sub == 1 and frag.super == 1:
frag.sub = 0
frag.super = 0
if frag.sub:
frag.rise = -frag.fontSize*subFraction
frag.fontSize = max(frag.fontSize-sizeDelta,3)
elif frag.super:
frag.rise = frag.fontSize*superFraction
frag.fontSize = max(frag.fontSize-sizeDelta,3)
if frag.greek:
frag.fontName = 'symbol'
data = _greekConvert(data)
# bold, italic, and underline
frag.fontName = tt2ps(frag.fontName,frag.bold,frag.italic)
#save our data
frag.text = data
if hasattr(frag,'isBullet'):
delattr(frag,'isBullet')
self.bFragList.append(frag)
else:
self.fragList.append(frag)
def handle_cdata(self,data):
self.handle_data(data)
def _setup_for_parse(self,style):
self._seq = reportlab.lib.sequencer.getSequencer()
self._reset(style) # reinitialise the parser
def parse(self, text, style):
"""Given a formatted string will return a list of
ParaFrag objects with their calculated widths.
If errors occur None will be returned and the
self.errors holds a list of the error messages.
"""
# AR 20040612 - when we feed Unicode strings in, sgmlop
# tries to coerce to ASCII. Must intercept, coerce to
# any 8-bit encoding which defines most of 256 points,
# and revert at end. Yuk. Preliminary step prior to
# removal of parser altogether.
enc = self._enc = 'utf8' #our legacy default
self._UNI = type(text) is UnicodeType
if self._UNI:
text = text.encode(enc)
self._setup_for_parse(style)
# the xmlparser requires that all text be surrounded by xml
# tags, therefore we must throw some unused flags around the
# given string
if not(len(text)>=6 and text[0]=='<' and _re_para.match(text)):
text = "<para>"+text+"</para>"
self.feed(text)
self.close() # force parsing to complete
return self._complete_parse()
def _complete_parse(self):
del self._seq
style = self._style
del self._style
if len(self.errors)==0:
fragList = self.fragList
bFragList = hasattr(self,'bFragList') and self.bFragList or None
self._iReset()
else:
fragList = bFragList = None
if self._UNI:
#reconvert to unicode
if fragList:
for frag in fragList:
frag.text = unicode(frag.text, self._enc)
if bFragList:
for frag in bFragList:
frag.text = unicode(frag.text, self._enc)
return style, fragList, bFragList
def _tt_parse(self,tt):
tag = tt[0]
try:
start = getattr(self,'start_'+tag)
end = getattr(self,'end_'+tag)
except AttributeError:
raise ValueError('Invalid tag "%s"' % tag)
start(tt[1] or {})
C = tt[2]
if C:
M = self._tt_handlers
for c in C:
M[type(c) is TupleType](c)
end()
def tt_parse(self,tt,style):
'''parse from tupletree form'''
self._setup_for_parse(style)
self._tt_handlers = self.handle_data,self._tt_parse
self._tt_parse(tt)
return self._complete_parse()
def findSpanStyle(self,style):
raise ValueError('findSpanStyle not implemented in this parser')
if __name__=='__main__':
from reportlab.platypus import cleanBlockQuotedText
from reportlab.lib.styles import _baseFontName
_parser=ParaParser()
def check_text(text,p=_parser):
print '##########'
text = cleanBlockQuotedText(text)
l,rv,bv = p.parse(text,style)
if rv is None:
for l in _parser.errors:
print l
else:
print 'ParaStyle', l.fontName,l.fontSize,l.textColor
for l in rv:
print l.fontName,l.fontSize,l.textColor,l.bold, l.rise, '|%s|'%l.text[:25],
if hasattr(l,'cbDefn'):
print 'cbDefn',getattr(l.cbDefn,'name',''),getattr(l.cbDefn,'label',''),l.cbDefn.kind
else: print
style=ParaFrag()
style.fontName=_baseFontName
style.fontSize = 12
style.textColor = black
style.bulletFontName = black
style.bulletFontName=_baseFontName
style.bulletFontSize=12
text='''
<b><i><greek>a</greek>D</i></b>β<unichr value="0x394"/>
<font name="helvetica" size="15" color=green>
Tell me, O muse, of that ingenious hero who travelled far and wide
after</font> he had sacked the famous town of Troy. Many cities did he visit,
and many were the nations with whose manners and customs he was acquainted;
moreover he suffered much by sea while trying to save his own life
and bring his men safely home; but do what he might he could not save
his men, for they perished through their own sheer folly in eating
the cattle of the Sun-god Hyperion; so the god prevented them from
ever reaching home. Tell me, too, about all these things, O daughter
of Jove, from whatsoever source you<super>1</super> may know them.
'''
check_text(text)
check_text('<para> </para>')
check_text('<para font="%s" size=24 leading=28.8 spaceAfter=72>ReportLab -- Reporting for the Internet Age</para>'%_baseFontName)
check_text('''
<font color=red>τ</font>Tell me, O muse, of that ingenious hero who travelled far and wide
after he had sacked the famous town of Troy. Many cities did he visit,
and many were the nations with whose manners and customs he was acquainted;
moreover he suffered much by sea while trying to save his own life
and bring his men safely home; but do what he might he could not save
his men, for they perished through their own sheer folly in eating
the cattle of the Sun-god Hyperion; so the god prevented them from
ever reaching home. Tell me, too, about all these things, O daughter
of Jove, from whatsoever source you may know them.''')
check_text('''
Telemachus took this speech as of good omen and rose at once, for
he was bursting with what he had to say. He stood in the middle of
the assembly and the good herald Pisenor brought him his staff. Then,
turning to Aegyptius, "Sir," said he, "it is I, as you will shortly
learn, who have convened you, for it is I who am the most aggrieved.
I have not got wind of any host approaching about which I would warn
you, nor is there any matter of public moment on which I would speak.
My grieveance is purely personal, and turns on two great misfortunes
which have fallen upon my house. The first of these is the loss of
my excellent father, who was chief among all you here present, and
was like a father to every one of you; the second is much more serious,
and ere long will be the utter ruin of my estate. The sons of all
the chief men among you are pestering my mother to marry them against
her will. They are afraid to go to her father Icarius, asking him
to choose the one he likes best, and to provide marriage gifts for
his daughter, but day by day they keep hanging about my father's house,
sacrificing our oxen, sheep, and fat goats for their banquets, and
never giving so much as a thought to the quantity of wine they drink.
No estate can stand such recklessness; we have now no Ulysses to ward
off harm from our doors, and I cannot hold my own against them. I
shall never all my days be as good a man as he was, still I would
indeed defend myself if I had power to do so, for I cannot stand such
treatment any longer; my house is being disgraced and ruined. Have
respect, therefore, to your own consciences and to public opinion.
Fear, too, the wrath of heaven, lest the gods should be displeased
and turn upon you. I pray you by Jove and Themis, who is the beginning
and the end of councils, [do not] hold back, my friends, and leave
me singlehanded- unless it be that my brave father Ulysses did some
wrong to the Achaeans which you would now avenge on me, by aiding
and abetting these suitors. Moreover, if I am to be eaten out of house
and home at all, I had rather you did the eating yourselves, for I
could then take action against you to some purpose, and serve you
with notices from house to house till I got paid in full, whereas
now I have no remedy."''')
check_text('''
But as the sun was rising from the fair sea into the firmament of
heaven to shed light on mortals and immortals, they reached Pylos
the city of Neleus. Now the people of Pylos were gathered on the sea
shore to offer sacrifice of black bulls to Neptune lord of the Earthquake.
There were nine guilds with five hundred men in each, and there were
nine bulls to each guild. As they were eating the inward meats and
burning the thigh bones [on the embers] in the name of Neptune, Telemachus
and his crew arrived, furled their sails, brought their ship to anchor,
and went ashore. ''')
check_text('''
So the neighbours and kinsmen of Menelaus were feasting and making
merry in his house. There was a bard also to sing to them and play
his lyre, while two tumblers went about performing in the midst of
them when the man struck up with his tune.]''')
check_text('''
"When we had passed the [Wandering] rocks, with Scylla and terrible
Charybdis, we reached the noble island of the sun-god, where were
the goodly cattle and sheep belonging to the sun Hyperion. While still
at sea in my ship I could bear the cattle lowing as they came home
to the yards, and the sheep bleating. Then I remembered what the blind
Theban prophet Teiresias had told me, and how carefully Aeaean Circe
had warned me to shun the island of the blessed sun-god. So being
much troubled I said to the men, 'My men, I know you are hard pressed,
but listen while I <strike>tell you the prophecy that</strike> Teiresias made me, and
how carefully Aeaean Circe warned me to shun the island of the blessed
sun-god, for it was here, she said, that our worst danger would lie.
Head the ship, therefore, away from the island.''')
check_text('''A<B>C&D"E'F''')
check_text('''A< B> C& D" E' F''')
check_text('''<![CDATA[<>&'"]]>''')
check_text('''<bullet face=courier size=14 color=green>+</bullet>
There was a bard also to sing to them and play
his lyre, while two tumblers went about performing in the midst of
them when the man struck up with his tune.]''')
check_text('''<onDraw name="myFunc" label="aaa bbb">A paragraph''')
check_text('''<para><onDraw name="myFunc" label="aaa bbb">B paragraph</para>''')
# HVB, 30.05.2003: Test for new features
_parser.caseSensitive=0
check_text('''Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''')
check_text('''Here comes <font face="Helvetica" size="14pt">Helvetica 14</font> with <Strong>strong</Strong> <em>emphasis</em>.''')
check_text('''Here comes <font face="Courier" size="3cm">Courier 3cm</font> and normal again.''')
check_text('''Before the break <br/>the middle line <br/> and the last line.''')
check_text('''This should be an inline image <img src='../docs/images/testimg.gif'/>!''')
check_text('''aaa bbbb <u>underline </u> cccc''')
|
import json
import sqlite3
class ProductHelper:
DEBUG = False
CONN = None
def create_db_connection(self, filename):
self.CONN = sqlite3.connect(filename)
def debug_print(self, s):
if self.DEBUG:
print(s)
def close_db(self):
try:
self.CONN.close()
except Exception as exp:
self.debug_print("Failed to close db connection.")
def default_get(self, j, s):
result = "na"
try:
result = j[s]
except Exception as exp:
self.debug_print("Error parsing {} from {}".format(s, j))
return result
def flatten_data(self, j, s):
flat = "na"
try:
flat = ",".join(j[s])
except Exception as exp:
self.debug_print("Unable to flatten {}".format(j))
return flat
def insert_product_from_json(self, json_string):
the_json = json.loads(str(json_string))
category = self.flatten_data(the_json, "category")
tech1 = self.default_get(the_json, "tech1")
description = self.flatten_data(the_json, "description")
fit = self.default_get(the_json, "fit")
title = self.default_get(the_json, "title")
also_buy = self.flatten_data(the_json, "also_buy")
image = self.flatten_data(the_json, "image")
tech2 = self.default_get(the_json, "tech2")
brand = self.default_get(the_json, "brand")
feature = self.flatten_data(the_json, "feature")
rank = self.flatten_data(the_json, "rank")
also_view = self.flatten_data(the_json, "also_view")
similar_item = self.default_get(the_json, "similar_item")
date = self.default_get(the_json, "date")
price = self.default_get(the_json, "price").replace("$","")
asin = self.default_get(the_json, "asin")
sql = "insert into products values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);"
fields = [
category,
tech1,
description,
fit,
title,
also_buy,
image,
tech2,
brand,
feature, rank, also_view, similar_item, date, price, asin]
self.CONN.execute(sql, fields)
def insert_json_lines(self, json_lines):
percentage = int(len(json_lines) / 100)
cnt = 0
for r_json in json_lines:
cnt = cnt + 1
if cnt % percentage == 0:
self.debug_print("{}% complete.".format(int(cnt / percentage)))
if cnt % 10000 == 0:
self.CONN.commit()
self.debug_print("Checkpoint.")
self.insert_product_from_json(r_json)
self.CONN.commit()
def create_product_table(self):
sql = """create table products (
category text,
tech1 text,
description text,
fit text,
title text,
also_buy text,
image text,
tech2 text,
brand text,
feature text,
rank text,
also_view text,
similar_item text,
date text,
price text,
asin text);
"""
self.CONN.execute(sql)
self.CONN.commit()
def create_index(self):
self.CONN.execute("drop index if exists products_asin_index;")
self.CONN.execute("create index products_asin_index on products(asin);")
self.CONN.commit()
def drop_product_table(self):
self.CONN.execute("drop index if exists products_asin_index;")
sql = "drop table if exists products;"
self.CONN.execute(sql)
self.CONN.commit()
|
#!/usr/bin/env python3
import requests
import argparse
import re
import logging
import time
from bs4 import BeautifulSoup
from slackclient import SlackClient
schedule_url = 'http://m.cgv.co.kr/Schedule/cont/ajaxMovieSchedule.aspx'
movieidx_url = 'http://m.cgv.co.kr/WebApp/MovieV4/movieDetail.aspx?cgvCode={}'
movieinfo_url = 'http://www.cgv.co.kr/movies/detail-view/?midx={}'
theater_url = 'http://m.cgv.co.kr/WebApp/TheaterV4/TheaterDetail.aspx?tc={}'
# Copied from https://github.com/maplejune/nightwatch-imax/blob/master/nightwatch_imax/schedule.py
# Expected format
# popupSchedule(movieName, screenName, startTime, remainSeat, capacitySeat, CGVCode, movieIdx, PlayYMD, PlayNum,
# ScreenCd, PlayTimeCd, Rating, ScreenRatingCd, ScreenRatingNm, StartHHMM, KidsScreenType,
# strPLAY_END_TM, strPLATFORM_NM, strMOVIE_RATING_NM, strPLAY_TIME_NM, strMOVIE_PKG_YN,
# strMOVIE_NOSHOW_YN, platformCd)
# Capture startTime, remainSeat, capacitySeat
MOVIE_CODE_PATTERN = re.compile("popupSchedule\('.*','.*','(\d\d:\d\d)','(\d*)','(\d*)',")
logging.basicConfig(format='%(asctime)s:%(message)s', level=logging.INFO)
logger = logging.getLogger()
class ScheduleInfo:
starttime = ''
remain = ''
capacity = ''
valid = False
def __init__(self, rawhtml):
info = MOVIE_CODE_PATTERN.search(str(rawhtml))
if info is not None:
self.starttime = info.group(1)
self.remain = info.group(2)
self.capacity = info.group(3)
self.valid = True
def __str__(self):
if self.valid:
return f'{self.starttime} {self.remain}/{self.capacity}'
else:
return ''
def query_schedule(theater_code, movie_code, date, screen_code):
schedule_response = requests.post(
schedule_url,
data={'theaterCd': theater_code, 'playYMD': date, 'src': screen_code}
).text
soup = BeautifulSoup(schedule_response, 'html.parser')
schedule_list = []
for a in soup.find_all('a'):
s = ScheduleInfo(a)
if s.valid:
schedule_list.append(s)
return '\n'.join([str(x) for x in schedule_list])
def send_message(sc, channelID, message):
if sc is None:
return
if channelID == '':
return
sc.api_call(
'chat.postMessage',
channel=channelID,
text=str(message)
)
def get_theater_name(code):
soup = BeautifulSoup(requests.get(theater_url.format(code)).text, 'html.parser')
title_div = soup.select('#headerTitleArea')
if len(title_div) == 1:
return title_div[0].text
else:
logger.warning(f'Title div is not unique. {title_div}')
return str(code)
def get_movie_name(code):
movieinfo_res = requests.get(movieidx_url.format(code)).text
soup = BeautifulSoup(movieinfo_res, 'html.parser')
movieidx = soup.select('#fanpageMovieIdx')
if len(movieidx) != 1:
logger.warning(f'Cannot find unique movie idx. {movieidx}')
return str(code)
movieidx = movieidx[0]['value']
soup = BeautifulSoup(requests.get(movieinfo_url.format(movieidx)).text, 'html.parser')
moviename = soup.find('div', 'title').find('strong').text
return moviename
def watch(theatercode, moviecode, date, screencode, slacktoken, slackchannel, period):
moviename = get_movie_name(moviecode)
theatername = get_theater_name(theatercode)
sc = None
if args.slacktoken != '':
sc = SlackClient(slacktoken)
cnt = 0
if sc is not None:
msg = f'Start monitoring for {moviename} in {theatername} with screencode {screencode} on {date}'
send_message(sc, slackchannel, msg)
while cnt < 10:
schedule = query_schedule(theatercode, moviecode, date, screencode)
logger.info(f'\n{schedule}')
if len(schedule) != 0:
cnt += 1
if sc is not None:
send_message(sc, args.slackchannel, f'Found schedule for {args.date}!\n{schedule}')
time.sleep(period)
send_message(sc, slackchannel, 'Found schedule 10 times! quiting')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--theatercode', type=str, required=True, help='Theater code')
parser.add_argument('-m', '--moviecode', type=str, required=True, help='Movie code')
parser.add_argument('-d', '--date', type=str, required=True, help='Date in YYYYMMDD format')
parser.add_argument('-s', '--screencode', type=str, required=True, help='Screen code')
parser.add_argument('--slacktoken', type=str, default='', help='Slack API Token')
parser.add_argument('--slackchannel', type=str, default='', help='Slack channel')
parser.add_argument('--period', type=int, default=60, help='Repeat time in seconds')
args = parser.parse_args()
watch(args.theatercode, args.moviecode, args.date, args.screencode, args.slacktoken, args.slackchannel, args.period)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from unittest import TestCase, main
from io import BytesIO
from pdfmajor.parser.PSStackParser import KWD, LIT, PSBaseParser, PSStackParser
from pdfmajor.execptions import PSEOF
## Simplistic Test cases
##
TESTDATA = br'''%!PS
begin end
" @ #
/a/BCD /Some_Name /foo#5f#xbaa
0 +1 -2 .5 1.234
(abc) () (abc ( def ) ghi)
(def\040\0\0404ghi) (bach\\slask) (foo\nbaa)
(this % is not a comment.)
(foo
baa)
(foo\
baa)
<> <20> < 40 4020 >
<abcd00
12345>
func/a/b{(c)do*}def
[ 1 (z) ! ]
<< /foo (bar) >>
'''
TOKENS = [
(5, KWD(b'begin')), (11, KWD(b'end')), (16, KWD(b'"')), (19, KWD(b'@')),
(21, KWD(b'#')), (23, LIT('a')), (25, LIT('BCD')), (30, LIT('Some_Name')),
(41, LIT('foo_xbaa')), (54, 0), (56, 1), (59, -2), (62, 0.5),
(65, 1.234), (71, b'abc'), (77, b''), (80, b'abc ( def ) ghi'),
(98, b'def \x00 4ghi'), (118, b'bach\\slask'), (132, b'foo\nbaa'),
(143, b'this % is not a comment.'), (170, b'foo\nbaa'), (180, b'foobaa'),
(191, b''), (194, b' '), (199, b'@@ '), (211, b'\xab\xcd\x00\x124\x05'),
(226, KWD(b'func')), (230, LIT('a')), (232, LIT('b')),
(234, KWD(b'{')), (235, b'c'), (238, KWD(b'do*')), (241, KWD(b'}')),
(242, KWD(b'def')), (246, KWD(b'[')), (248, 1), (250, b'z'), (254, KWD(b'!')),
(256, KWD(b']')), (258, KWD(b'<<')), (261, LIT('foo')), (266, b'bar'),
(272, KWD(b'>>'))
]
OBJS = [
(23, LIT('a')), (25, LIT('BCD')), (30, LIT('Some_Name')),
(41, LIT('foo_xbaa')), (54, 0), (56, 1), (59, -2), (62, 0.5),
(65, 1.234), (71, b'abc'), (77, b''), (80, b'abc ( def ) ghi'),
(98, b'def \x00 4ghi'), (118, b'bach\\slask'), (132, b'foo\nbaa'),
(143, b'this % is not a comment.'), (170, b'foo\nbaa'), (180, b'foobaa'),
(191, b''), (194, b' '), (199, b'@@ '), (211, b'\xab\xcd\x00\x124\x05'),
(230, LIT('a')), (232, LIT('b')), (234, [b'c']), (246, [1, b'z']),
(258, {'foo': b'bar'}),
]
class TestPSBaseParser(TestCase):
def get_tokens(self, s):
class MyParser(PSStackParser):
def flush(self):
self.add_results(*self.popall())
parser = MyParser(BytesIO(s))
r = []
try:
while True:
r.append(parser.nexttoken())
except PSEOF:
pass
return r
def get_objects(self, s):
class MyParser(PSStackParser):
def flush(self):
self.add_results(*self.popall())
parser = MyParser(BytesIO(s))
r = []
try:
while True:
r.append(parser.nextobject())
except PSEOF:
pass
return r
def test_1(self):
tokens = self.get_tokens(TESTDATA)
logging.info(tokens)
self.assertEqual(tokens, TOKENS)
return
def test_2(self):
objs = self.get_objects(TESTDATA)
logging.info(objs)
self.assertEqual(objs, OBJS)
return
if __name__ == '__main__':
#import logging,sys,os,six
#logging.basicConfig(level=logging.DEBUG, filename='%s_%d.%d.log'%(os.path.basename(__file__),sys.version_info[0],sys.version_info[1]))
main() |
<reponame>elif-furkan-celik/KNN_Regression_and_Decision_Tree
# -*- coding: utf-8 -*-
"""knn_dtc_.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1xd8oJeNQ0RiUtMXUhNaMfrc4eoT8mJkv
"""
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix,accuracy_score
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.cluster import KMeans
from numpy import unique
from numpy import where
from matplotlib import pyplot
import pydotplus
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.cleveland.data"
names = ['age', 'gender', 'cp', 'trestbps', 'chol','fbs','restecg','thalach','exang','oldpeak','slope','ca','thal','diagnosis']
data = pd.read_csv(url, names=names,na_values=["?"])
data.median()
data["ca"].fillna(0,inplace = True)
data["thal"].fillna(3,inplace = True)
data.info()
X = data.iloc[:,0:12]
y = data.iloc[:,13]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=3)
X_train = X_train.fillna(X_train.mean())
X_test = X_test.fillna(X_test.mean())
"""**KNN Regresyon**"""
clf1 = KNeighborsClassifier(n_neighbors=10)
clf1.fit(X_train, y_train)
y_pred = clf1.predict(X_test)
cm1 = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(5,4))
sns.heatmap(cm1, annot=True)
plt.title('kNN \nAccuracy:{0:.3f}'.format(accuracy_score(y_test, y_pred)))
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
scaler = StandardScaler()
scaler.fit_transform(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
clf2 = KNeighborsClassifier(n_neighbors=10)
clf2.fit(X_train, y_train)
y_pred = clf1.predict(X_test)
cm2 = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(5,4))
sns.heatmap(cm2, annot=True)
plt.title('kNN \nAccuracy:{0:.3f}'.format(accuracy_score(y_test, y_pred)))
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
from sklearn.metrics import mean_absolute_error, mean_squared_error
mae_e = []
mse_e = []
for i in range(1,35):
clf = KNeighborsClassifier(n_neighbors=i)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(5,4))
sns.heatmap(cm, annot=True)
plt.title('kNN \nAccuracy:{0:.3f}'.format(accuracy_score(y_test, y_pred)))
plt.ylabel('True label')
plt.xlabel('Predicted label')
error_mae = mean_absolute_error(y_test, y_pred)
mae_e.append(error_mae)
error_mse = mean_squared_error(y_test, y_pred, squared=False)
mse_e.append(error_mse)
print('MAE : ', error_mae)
print('MSE : ', error_mse)
plt.show()
curve = pd.DataFrame(mae_e)
curve.plot()
curve2 = pd.DataFrame(mse_e)
curve2.plot()
"""**karar** **ağacı**"""
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn import tree
mae = []
mse = []
X = data.iloc[:,0:12]
y = data.iloc[:,13]
for i in range(1,5):
for j in range(1,5):
i=i+1
print("split=",i,", depth=",j)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
dtc = DecisionTreeClassifier(min_samples_split=i)
dtc.fit(X_train, y_train)
dtc_1 = DecisionTreeRegressor(max_depth=j)
dtc_1.fit(X_train, y_train)
test_predictions = dtc_1.predict(X_test)
print(test_predictions)
err_mae = mean_absolute_error(y_test, test_predictions)
mae.append(err_mae)
err_mse = mean_squared_error(y_test, test_predictions, squared=False)
mse.append(err_mse)
print('MAE : ', err_mae)
print('MSE : ', err_mse)
dtc_curve = pd.DataFrame(mae)
dtc_curve.plot()
dtc_curve2 = pd.DataFrame(mse)
dtc_curve2.plot()
"""Best 8. dt (split=6 depth=4)"""
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
bdtc = DecisionTreeClassifier(min_samples_split=6)
bdtc.fit(X_train, y_train)
bdtc_1 = DecisionTreeRegressor(max_depth=4)
bdtc_1.fit(X_train, y_train)
from IPython.display import Image
dot_data = tree.export_graphviz(bdtc_1, out_file=None)
graph = pydotplus.graph_from_dot_data(dot_data)
Image(graph.create_png()) |
<gh_stars>1-10
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from collections import Counter
from tensorflow import keras
from tensorflow.python.ops import math_ops
import tensorflow as tf
import os
import pickle
import re
from urllib.request import urlretrieve
from os.path import isfile, isdir
# from tqdm import tqdm
import zipfile
import hashlib
import time
import datetime
import random
from sklearn.externals import joblib
# class DLProgress(tqdm):
# """
# Handle Progress Bar while Downloading
# """
# last_block = 0
# def hook(self, block_num=1, block_size=1, total_size=None):
# """
# A hook function that will be called once on establishment of the network connection and
# once after each block read thereafter.
# :param block_num: A count of blocks transferred so far
# :param block_size: Block size in bytes
# :param total_size: The total size of the file. This may be -1 on older FTP servers which do not return
# a file size in response to a retrieval request.
# """
# self.total = total_size
# self.update((block_num - self.last_block) * block_size)
# self.last_block = block_num
# class ZipDownload:
# def __init__(self):
# None
# def _unzip(self,save_path, _, database_name, data_path):
# """
# Unzip wrapper with the same interface as _ungzip
# :param save_path: The path of the gzip files
# :param database_name: Name of database
# :param data_path: Path to extract to
# :param _: HACK - Used to have to same interface as _ungzip
# """
# print('Extracting {}...'.format(database_name))
# with zipfile.ZipFile(save_path) as zf:
# zf.extractall(data_path)
# def download_extract(self,database_name, data_path):
# """
# Download and extract database
# :param database_name: Database name
# """
# DATASET_ML1M = 'ml-1m'
# if database_name == DATASET_ML1M:
# url = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip'
# hash_code = 'c4d9eecfca2ab87c1945afe126590906'
# extract_path = os.path.join(data_path, 'ml-1m') #网络中的路径
# save_path = os.path.join(data_path, 'ml-1m.zip') #本地电脑的路径
# extract_fn = self._unzip
# if os.path.exists(extract_path): #本地含有该文件,表示我之前已经下载过(程序多次运行)
# print('Found {} Data'.format(database_name))
# return
# if not os.path.exists(data_path):
# os.makedirs(data_path)
# if not os.path.exists(save_path):
# with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Downloading {}'.format(database_name)) as pbar:
# urlretrieve(
# url,
# save_path,
# pbar.hook)
# assert hashlib.md5(open(save_path, 'rb').read()).hexdigest() == hash_code, \
# '{} file is corrupted. Remove the file and try again.'.format(save_path)
# os.makedirs(extract_path)
# try:
# extract_fn(save_path, extract_path, database_name, data_path)
# except Exception as err:
# shutil.rmtree(extract_path) # Remove extraction folder if there is an error
# raise err
# print('Done.')
'''
#数据预处理
1,用户数据:用户性别映射为F:0,M:1,用户年龄映射成连续的数字0-6,zip-code过滤掉
2,电影数据:movie title映射为数字,流派也映射为数字。
3,评分数据:去掉时间戳信息
'''
def DataPreprocess():
'''
数据预处理
'''
#数据的读取
users_title = ['User-ID','Gender','Age','Occupation','Zip-code']
users = pd.read_csv('./ml-1m/users.dat',sep = '::',header = None,names = users_title,engine = 'python')
# users.head()
movies_title = ['Movie-ID','Title','Genres']
movies = pd.read_csv('./ml-1m/movies.dat',sep = '::',header = None,names = movies_title,engine = 'python')
# movies.head()
ratings_title = ['User-ID','Movie-ID','Rating','Timestamps']
ratings = pd.read_csv('./ml-1m/ratings.dat',sep = '::',header = None,names = ratings_title,engine = 'python')
# ratings.head()
#---------------------------------------------------------------------------------------------------
#用户数据预处理
users_title = ['UserID','Gender','Age','Occupation','Zip-code']
users = pd.read_csv('./ml-1m/users.dat',sep = '::',header = None,names =users_title,engine = 'python')
users = users.filter(regex = 'UserID|Gender|Age|Occupation')
users_orig = users.values #不知道作用是什么
#性别数据处理
gender_map = {'F' :0,'M':1}
users['Gender'] = users['Gender'].map(gender_map)
#年龄数据处理
age_map = {val:id for id,val in enumerate(set(users['Age']))}
users['Age'] = users['Age'].map(age_map)
#电影数据预处理
movies_title = ['MovieID','Title','Genres']
movies = pd.read_csv('./ml-1m/movies.dat',sep = '::',header = None,names =movies_title,engine = 'python')
movies_orig = movies.values
#电影流派数字化
genres_set = set()
for genres in movies['Genres'].str.split('|'):
genres_set.update(genres) #使用update和add是一样的,使用add报错
genres_set.add('<None>')
genres2int = {val:id for id,val in enumerate(genres_set)}
# movies['Genres'] = genres_map(genres_map)
#将电影类型转化为等长的数字列表,长度为流派数
genres_map = {val:[genres2int[row] for row in val.split('|')] for val in set(movies['Genres'])}
for key in genres_map:
for rest in range(max(genres2int.values()) - len(genres_map[key])):
genres_map[key].insert(len(genres_map[key]) + rest,genres2int['<None>'])
movies['Genres'] = movies['Genres'].map(genres_map)
#电影标题除去年份
pattern = re.compile(r'^(.*)\((\d+)\)$')
title_map = {val:pattern.match(val).group(1) for val in movies['Title']} #匹配的字符只取第一项
movies['Title'] = movies['Title'].map(title_map)
#电影标题数字化
title_set = set()
for title in movies['Title'].str.split():
title_set.update(title)
title_set.add('<None>')
title2int = {val:id for id,val in enumerate(title_set)}
title_max_len = 15
title_map = {title:[title2int[row] for row in title.split()] for title in movies['Title']}
for key in title_map:
for rest in range(title_max_len - len(title_map[key])):
title_map[key].insert(len(title_map[key]) + rest,title2int['<None>'])
movies['Title'] = movies['Title'].map(title_map)
#处理评分数据集
ratings_title = ['UserID','MovieID','Rating','Timestamps']
ratings = pd.read_csv('./ml-1m/ratings.dat',sep = '::',header = None,names =ratings_title,engine = 'python')
ratings = ratings.filter(regex = 'UserID|MovieID|Rating')
#合并3个表
data = pd.merge(pd.merge(ratings, users), movies)
#分离出目标
#将数据分成X和y两张表
target_fields = ['Rating']
features_pd, targets_pd = data.drop(target_fields, axis=1), data[target_fields]
features = features_pd.values
targets = targets_pd.values
pickle.dump((title_max_len,title_set,genres2int, features,targets,ratings,users,movies,data,movies_orig,users_orig),open('dataProcess.pkl','wb'))
# return title_max_len,title_set,genres2int, features,targets,ratings,users,movies,data,movies_orig,users_orig
#构建网络模型
class mv_network(object):
def __init__(self,data_features,batch_size = 256,learning_rate = 0.0001,embed_dim = 32,dropout_keep = 0.5,filter_num = 8,slide_window = [2,3,4,5]):
super(mv_network,self).__init__()
self.batch_size = batch_size
self.learning_rate = learning_rate
self.embed_dim = embed_dim
self.dropout_keep = dropout_keep
self.filter_num = filter_num
self.slide_window = slide_window
self.movie_matrix = []
self.user_matrix = []
self.best_loss = 99999
self.losses = {'train':[], 'test':[]}
self.MODEL_DIR = './models'
uid,user_gender,user_age,user_occupation,movie_id,movie_genres,movie_titles = self.get_inputs(data_features)
#获得用户特征
uid_embedding_layer,user_gender_embedding_layer,user_age_embedding_layer,user_occupation_embedding_layer = \
self.get_user_embedding_layer(uid,user_gender,user_age,user_occupation)
user_combine_layer, user_combine_layer_flat = \
self.get_user_features(uid_embedding_layer,user_gender_embedding_layer,user_age_embedding_layer,user_occupation_embedding_layer)
#获得电影特征
movieID_embedding_layer = self.get_movieID_embedding_layer(movie_id)
movie_genres_embedding_layer = self.get_movie_generes_embedding_layer(movie_genres)
pool_layer_flat,dropout_layer = self.get_movie_title_cnn_layer(movie_titles)
movie_combine_layer,movie_combine_layer_flat = self.get_movie_feature_layer(movieID_embedding_layer,movie_genres_embedding_layer,dropout_layer)
# 计算出评分
# 将用户特征和电影特征做矩阵乘法得到一个预测评分的方案
inference = tf.keras.layers.Lambda(lambda layer:tf.reduce_sum(layer[0] * layer[1],axis = 1),name='inference')((user_combine_layer_flat,movie_combine_layer_flat))
inference = tf.keras.layers.Lambda(lambda layer: tf.expand_dims(layer, axis=1))(inference)
self.model = tf.keras.Model(
inputs = [uid,user_gender,user_age,user_occupation,movie_id,movie_genres,movie_titles],
outputs = [inference])
self.model.summary() #打印参数信息
#定义评价指标和优化器
self.optimizer = tf.keras.optimizers.Adam(self.learning_rate)
#定义损失函数
self.ComputeLoss = tf.keras.losses.MeanSquaredError()
self.ComputeMetricsMAE = tf.keras.metrics.MeanAbsoluteError()
self.avg_loss = tf.keras.metrics.Mean('loss',dtype = tf.float32)
#保存参数
if tf.io.gfile.exists(self.MODEL_DIR ):
pass
else:
tf.io.gfile.mkdir(self.MODEL_DIR )
checkpoint_dir = os.path.join(self.MODEL_DIR , 'checkpoints')
self.checkpoint_prefix = os.path.join(checkpoint_dir,'ckpt')
self.checkpoint = tf.train.Checkpoint(model = self.model,optimizer = self.optimizer)
#重构代码,如果存在一个checkpoint
self.checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
def get_params(self,data_features):
features, genres2int, title_set, title_max_len, movies = data_features[0],data_features[1],data_features[2],data_features[3],data_features[4]
reserved_num = 1
#用户id个数
self.uid_max = max(features.take(0,1)) + reserved_num
#性别个数
self.gender_max = 2
#年龄分类个数
self.age_max = max(features.take(3,1)) + 1
#职业个数
self.occupation_max = max(features.take(4,1)) + 1
#电影
#电影id个数
self.movieid_max = max(features.take(1,1)) + reserved_num
#电影流派
self.genres_max = max(genres2int.values()) + 1
#标题单词个数
self.title_max = len(title_set)
#标题文本最长长度
self.sentences_size = title_max_len
#电影ID转下标的字典,数据集中电影ID跟下标不一致,比如第5行的数据电影ID不一定是5
self.movieid2idx = {val[0]:id for id, val in enumerate(movies.values)}
#定义输入的占位符
def get_inputs(self,data_features):
self.get_params(data_features)
uid = tf.keras.layers.Input(shape=(1,),dtype="int32",name = 'uid')
user_gender = tf.keras.layers.Input(shape = (1,),dtype = "int32",name = 'user_gender')
user_age = tf.keras.layers.Input(shape = (1,),dtype = "int32",name = 'user_age')
user_occupation = tf.keras.layers.Input(shape = (1,),dtype = "int32",name = 'user_occupation')
movie_id = tf.keras.layers.Input(shape = (1,),dtype = "int32",name = 'movie_id')
movie_genres = tf.keras.layers.Input(shape = (self.genres_max-1,),dtype = "int32",name = 'movie_genres')
movie_titles = tf.keras.layers.Input(shape = (self.sentences_size,),dtype = "string",name = 'movie_titles')
return uid,user_gender,user_age,user_occupation,movie_id,movie_genres,movie_titles
#定义用户的嵌入矩阵
def get_user_embedding_layer(self,uid,user_gender,user_age,user_occupation):
uid_embedding_layer = tf.keras.layers.Embedding(self.uid_max,self.embed_dim,input_length = 1,name = 'uid_embedding_layer')(uid)
user_gender_embedding_layer = tf.keras.layers.Embedding(self.gender_max,self.embed_dim//8,input_length = 1,name ='user_gender_embedding_layer')(user_gender)
user_age_embedding_layer = tf.keras.layers.Embedding(self.age_max,self.embed_dim//2,input_length = 1,name ='user_age_embedding_layer')(user_age)
user_occupation_embedding_layer = tf.keras.layers.Embedding(self.occupation_max,self.embed_dim,input_length = 1,name ='user_occupation_embedding_layer')(user_occupation)
# movie_id_embedding_layer = tf.keras.layers.Embedding(movieid_max,self.embed_dim,input_length = 1,name ='movie_id_embedding_layer')
# movie_genres_embedding_layer = tf.keras.layers.Embedding(genres_max,self.embed_dim,input_length = 18,name ='movie_genres_embedding')
return uid_embedding_layer,user_gender_embedding_layer,user_age_embedding_layer,user_occupation_embedding_layer
#将用户的嵌入矩阵先各个经过一个全连接层,然后整合到一起,然后再连接一个全连接层
def get_user_features(self,uid_embedding_layer,user_gender_embedding_layer,user_age_embedding_layer,user_occupation_embedding_layer):
#各自全连接
uid_fc_layer = tf.keras.layers.Dense(self.embed_dim,name = 'uid_fc_layer',activation ='relu')(uid_embedding_layer)
user_gender_fc_layer = tf.keras.layers.Dense(self.embed_dim,name = 'user_gender_fc_layer',activation ='relu')(user_gender_embedding_layer)
user_age_fc_layer = tf.keras.layers.Dense(self.embed_dim,name ='user_age_fc_layer',activation ='relu')(user_age_embedding_layer)
user_occupation_fc_layer = tf.keras.layers.Dense(self.embed_dim,name = 'user_occupation_fc_layer',activation ='relu')(user_occupation_embedding_layer)
#将用户特征合并
user_combine_layer0 = tf.keras.layers.concatenate([uid_fc_layer,user_gender_fc_layer,user_age_fc_layer,user_occupation_fc_layer])
user_combine_layer1 = tf.keras.layers.Dense(256,activation='tanh')(user_combine_layer0)
#将得到的用户特征展平
user_combine_layer_flat = tf.keras.layers.Reshape([256],name='user_combine_layer_flat')(user_combine_layer1)
return user_combine_layer1, user_combine_layer_flat
#定义movieID的嵌入矩阵
def get_movieID_embedding_layer(self,movie_id):
movieID_embedding_layer = tf.keras.layers.Embedding(self.movieid_max,self.embed_dim,input_length = 1,name = 'movieID_embedding_layer')(movie_id)
return movieID_embedding_layer
def get_movie_generes_embedding_layer(self,movie_genres):
movie_genres_embedding_layer = tf.keras.layers.Embedding(self.genres_max,self.embed_dim,input_length = self.genres_max,name = 'movie_genres_embedding_layer')(movie_genres)
movie_genres_embedding_layer = tf.keras.layers.Lambda(lambda layer : tf.reduce_sum(layer,axis=1,keepdims=True))(movie_genres_embedding_layer)
#不知道为什么要将电影流派求和,每个电影的流派特征变成一个数
return movie_genres_embedding_layer
#movie title的文本卷积网络的实现
def get_movie_title_cnn_layer(self,movie_titles):
#从嵌入矩阵中得到电影名对应的各个单词的嵌入向量
movie_title_embedding_layer = tf.keras.layers.Embedding(self.title_max,self.embed_dim,input_length = self.sentences_size,name = 'movie_titles_embedding_layer')(movie_titles)
sp = movie_title_embedding_layer.shape
movie_title_embedding_layer_expand = tf.keras.layers.Reshape([sp[1],sp[2],1])(movie_title_embedding_layer)
#对文本嵌入层使用不同尺寸的卷积核进行卷积核和最大池化
pool_layer_list = []
for slide_window_size in self.slide_window:
conv_layer = tf.keras.layers.Conv2D(self.filter_num,(slide_window_size,self.embed_dim),1,activation = 'relu')(movie_title_embedding_layer_expand)
maxpool_layer = tf.keras.layers.MaxPooling2D(pool_size = (self.sentences_size-slide_window_size+1,1),strides=1)(conv_layer)
pool_layer_list.append(maxpool_layer)
#Dropout层
pool_layer = tf.keras.layers.concatenate(pool_layer_list,3,name = 'pool_layer')
max_num = len(self.slide_window) * self.filter_num
pool_layer_flat = tf.keras.layers.Reshape([1,max_num],name = 'pool_layer_flat')(pool_layer)
dropout_layer = tf.keras.layers.Dropout(self.dropout_keep,name = 'dropout_layer')(pool_layer_flat)
return pool_layer_flat,dropout_layer
def get_movie_feature_layer(self,movieID_embedding_layer,movie_genres_embedding_layer,dropout_layer):
#各嵌入层的全连接
movieID_fc_layer = tf.keras.layers.Dense(self.embed_dim,name = 'movieID_fc_layer',activation ='relu')(movieID_embedding_layer)
movie_genres_fc_layer = tf.keras.layers.Dense(self.embed_dim,name = 'movie_genres_fc_layer',activation ='relu')(movie_genres_embedding_layer)
#将嵌入层全连接层输出和文本卷积网络合并,然后再接一个全连接层
movie_combine_layer = tf.keras.layers.concatenate([movieID_fc_layer,movie_genres_fc_layer,dropout_layer],2)
movie_combine_layer = tf.keras.layers.Dense(256,name = 'movie_combine_layer',activation ='tanh')(movie_combine_layer)
movie_combine_layer_flat = tf.keras.layers.Reshape([256],name='movie_combine_layer_flat')(movie_combine_layer)
return movie_combine_layer,movie_combine_layer_flat
#取得batch
def get_batches(self,Xs, ys):
for start in range(0, len(Xs), self.batch_size):
end = min(start + self.batch_size, len(Xs))
yield Xs[start:end], ys[start:end]
#计算图构建,结果计算
@tf.function
def train_step(self, x, y):
# Record the operations used to compute the loss, so that the gradient
# of the loss with respect to the variables can be computed.
# metrics = 0
with tf.GradientTape() as tape:
logits = self.model([x[0],
x[1],
x[2],
x[3],
x[4],
x[5],
x[6]], training=True)
loss = self.ComputeLoss(y, logits)
# loss = self.compute_loss(labels, logits)
# metrics = self.compute_metrics(labels, logits)
grads = tape.gradient(loss, self.model.trainable_variables)
self.ComputeMetricsMAE(y, logits)#
self.avg_loss(loss)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
return loss, logits
#训练集的训练
def training(self,users,movies,features,target_values,epochs = 1,log_freq = 50):
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
for epoch_i in range(epochs):
train_X,test_X,train_y,test_y = train_test_split(features,target_values,test_size = 0.2,random_state = 0)
train_batches = self.get_batches(train_X,train_y)
batch_num = len(train_X) // self.batch_size
train_start = time.time()
if True:
start = time.time()
for batch_i in range(batch_num):
x,y = next(train_batches)
#标题和电影类别特殊处理
movie_genres = np.zeros([self.batch_size,18])
movie_titles = np.zeros([self.batch_size,15])
for i in range(self.batch_size):
movie_genres[i] = x.take(6,1)[i]
movie_titles[i] = x.take(5,1)[i]
loss,logits = self.train_step([np.reshape(x.take(0,1),[self.batch_size,1]).astype(np.float32),
np.reshape(x.take(2,1),[self.batch_size,1]).astype(np.float32),
np.reshape(x.take(3,1),[self.batch_size,1]).astype(np.float32),
np.reshape(x.take(4,1),[self.batch_size,1]).astype(np.float32),
np.reshape(x.take(1,1),[self.batch_size,1]).astype(np.float32),
movie_genres.astype(np.float32),
movie_titles.astype(np.float32)],
np.reshape(y,[self.batch_size, 1]).astype(np.float32))
# avg_loss(loss) #计算平均误差
self.losses['train'].append(loss)
with train_summary_writer.as_default():
tf.summary.scalar('avg_loss',self.avg_loss.result(),step = epoch_i)
tf.summary.scalar('MAE',self.ComputeMetricsMAE.result(),step = epoch_i)
if tf.equal(self.optimizer.iterations % log_freq, 0):
# summary_ops_v2.scalar('loss', avg_loss.result(), step=self.optimizer.iterations)
# summary_ops_v2.scalar('mae', self.ComputeMetricsMAE.result(), step=self.optimizer.iterations)
# summary_ops_v2.scalar('mae', avg_mae.result(), step=self.optimizer.iterations)
rate = log_freq / (time.time() - start)
print('Step #{}\tEpoch {:>3} Batch {:>4}/{} Loss: {:0.6f} mae: {:0.6f} ({} steps/sec)'.format(
self.optimizer.iterations.numpy(),
epoch_i,
batch_i,
batch_num,
loss, (self.ComputeMetricsMAE.result()), rate))
# print('Step #{}\tLoss: {:0.6f} mae: {:0.6f} ({} steps/sec)'.format(
# self.optimizer.iterations.numpy(), loss, (avg_mae.result()), rate))
# self.avg_loss.reset_states()
# self.ComputeMetricsMAE.reset_states()
# avg_mae.reset_states()
start = time.time()
self.avg_loss.reset_states()
self.ComputeMetricsMAE.reset_states()
train_end = time.time()
print(
'\nTrain time for epoch #{} ({} total steps): {}'.format(epoch_i + 1, self.optimizer.iterations.numpy(),
train_end - train_start))
# with self.test_summary_writer.as_default():
self.testing((test_X, test_y), self.optimizer.iterations)
# self.checkpoint.save(self.checkpoint_prefix)
self.export_path = os.path.join(self.MODEL_DIR , 'export')
tf.saved_model.save(self.model, self.export_path) #保存模型的参数
self.GenerateMovieModel(movies)
self.GenerateUserModel(users)
def testing(self, test_dataset, step_num):
test_X, test_y = test_dataset
test_batches = self.get_batches(test_X, test_y)
"""Perform an evaluation of `model` on the examples from `dataset`."""
# avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
# avg_mae = tf.keras.metrics.Mean('mae', dtype=tf.float32)
batch_num = (len(test_X) // self.batch_size)
for batch_i in range(batch_num):
x, y = next(test_batches)
categories = np.zeros([self.batch_size, 18])
for i in range(self.batch_size):
categories[i] = x.take(6, 1)[i]
titles = np.zeros([self.batch_size, self.sentences_size])
for i in range(self.batch_size):
titles[i] = x.take(5, 1)[i]
logits = self.model([np.reshape(x.take(0, 1), [self.batch_size, 1]).astype(np.float32),
np.reshape(x.take(2, 1), [self.batch_size, 1]).astype(np.float32),
np.reshape(x.take(3, 1), [self.batch_size, 1]).astype(np.float32),
np.reshape(x.take(4, 1), [self.batch_size, 1]).astype(np.float32),
np.reshape(x.take(1, 1), [self.batch_size, 1]).astype(np.float32),
categories.astype(np.float32),
titles.astype(np.float32)], training=False)
test_loss = self.ComputeLoss(np.reshape(y, [self.batch_size, 1]).astype(np.float32), logits)
self.avg_loss(test_loss)
# 保存测试损失
self.losses['test'].append(test_loss)
self.ComputeMetricsMAE(np.reshape(y, [self.batch_size, 1]).astype(np.float32), logits)
# avg_loss(self.compute_loss(labels, logits))
# avg_mae(self.compute_metrics(labels, logits))
print('Model test set loss: {:0.6f} mae: {:0.6f}'.format(self.avg_loss.result(), self.ComputeMetricsMAE.result()))
# print('Model test set loss: {:0.6f} mae: {:0.6f}'.format(avg_loss.result(), avg_mae.result()))
# summary_ops_v2.scalar('loss', avg_loss.result(), step=step_num)
# summary_ops_v2.scalar('mae', self.ComputeMetricsMAE.result(), step=step_num)
# summary_ops_v2.scalar('mae', avg_mae.result(), step=step_num)
if self.avg_loss.result() < self.best_loss:
self.best_loss = self.avg_loss.result()
print("best loss = {}".format(self.best_loss))
self.checkpoint.save(self.checkpoint_prefix)
def rating_movie(self,userID,movieID,users):
genres = np.array(movies.values[self.movieid2idx[movieID]][2]).reshape(1,self.genres_max)
titles = np.array(movies.values[self.movieid2idx[movieID]][1]).reshape(1,self.sentences_size)
inference_val = self.model([np.reshape(users.values[userID-1][0],[1,1]),
np.reshape(users.values[userID-1][1],[1,1]),
np.reshape(users.values[userID-1][2],[1,1]),
np.reshape(users.values[userID-1][3],[1,1]),
np.reshape(movies.values[self.movieid2idx(movieID)][0],[1,1]),
genres,
titles])
return inference_val.numpy()
def GenerateMovieModel(self,movies):
print("MovieModel")
movie_layer_model = tf.keras.models.Model(inputs = [self.model.input[4],self.model.input[5],self.model.input[6]],
outputs = self.model.get_layer("movie_combine_layer_flat").output)
self.movie_matrix = [] #
for item in movies.values:
genre = np.reshape(item.take(2),[1,self.genres_max-1])
title = np.reshape(item.take(1),[1,self.sentences_size])
movie_combine_layer_flat_val = movie_layer_model([np.reshape(item.take(0),[1,1]),genre,title])
self.movie_matrix.append(movie_combine_layer_flat_val)
pickle.dump(np.array(self.movie_matrix).reshape(-1,256),open('movie_matrix.p','wb'))
def GenerateUserModel(self,users):
print("UserModel")
user_layer_model = tf.keras.models.Model(inputs = [self.model.input[0],self.model.input[1],self.model.input[2],self.model.input[3]],
outputs = self.model.get_layer("user_combine_layer_flat").output)
self.user_matrix = [] #用户的特征矩阵
for item in users.values:
user_combine_layer_flat_val = user_layer_model([np.reshape(item.take(0),[1,1]),np.reshape(item.take(1),[1,1]),np.reshape(item.take(2),[1,1]),np.reshape(item.take(3),[1,1])])
self.user_matrix.append(user_combine_layer_flat_val)
pickle.dump(self.user_matrix,open('user_matrix.p','wb'))
#user_matrix = pickle.load(open('user_matrix.p',mod = 'rb'))
def Recommend_similary_items(self,movieID,topK):
"""
下面的算法类似LFM的算法,推荐相似的物品
"""
normed_movie_matrix = tf.sqrt(tf.reduce_sum(tf.square(self.movie_matrix),1,keepdims = True))
normalized_movie_matrix = self.movie_matrix / normed_movie_matrix
#print(self.movieid2idx)
movie_embed_vector = self.movie_matrix[self.movieid2idx[movieID]].reshape(1,256)
movies_similarity = tf.matmul(movie_embed_vector,tf.transpose(normalized_movie_matrix))
movies_similarity_arr = movies_similarity.numpy()
p = np.squeeze(movies_similarity_arr) #删除维度为0的维度
random_times = 4
movies_similary_loc = np.argpartition(-p,topK*random_times)
random.shuffle(movies_similary_loc[0:topK*random_times])
recommend_movies = movies_similary_loc[0:topK] #存在的问题:不清楚movie_matrix特征矩阵这里面的位置是否对应真正的电影标号
recommend_movies = [x+1 for x in recommend_movies]
return recommend_movies
def Recommend2user(self,userID,ItemNum):
'''
推荐你喜欢的电影
'''
# user_embed = self.user_matrix[userID-1].reshape([1,256])
user_ratings = tf.matmul(self.user_matrix[userID-1], tf.transpose(self.movie_matrix))
user_ratings = np.array(user_ratings)[0]
# print(user_ratings)
random_times = 4
movies_favorite_loc = np.argpartition(-user_ratings,ItemNum*random_times)[0:ItemNum*random_times]
random.shuffle(movies_favorite_loc)
movies_favorite = movies_favorite_loc[0:ItemNum]
movies_favorite = [x+1 for x in movies_favorite]
return movies_favorite
def RecommendOtherFavoriteMovies(self,movieID,itemNum):
'''
坑点:直接进行类型转换会导致程序崩溃
'''
movie_embed_vector = self.movie_matrix[self.movieid2idx[movieID]].reshape((1,256))
movie_mat = np.zeros((len(self.movie_matrix),1,self.movie_matrix[0].shape[0]))
for i in range(len(self.movie_matrix)):
movie_mat[i] = self.movie_matrix[i]
movie_mat = np.squeeze(movie_mat)
user_ratings = np.matmul(movie_embed_vector , movie_mat.T)
user_ratings = np.array(user_ratings)
random_times = 4
user_favorite_loc = np.argpartition(-user_ratings,itemNum*random_times)[0:itemNum*random_times]
random.shuffle(user_favorite_loc)
user_favorite = user_favorite_loc[0,0:itemNum]
user_mat2 = list(movie_mat[list(user_favorite)])
other_favorite = np.matmul(user_mat2,movie_mat.T)
other_favorite_loc = np.argpartition(-other_favorite,itemNum)[0,0:itemNum]
other_favorite_movies = [x+1 for x in other_favorite_loc]
return other_favorite_movies
# #结果的可视化
def test():
title_max_len,title_set,genres2int, features,targets,ratings,users,movies,data,movies_orig,users_orig = pickle.load(open('dataProcess.pkl',mode='rb'))
data_features = [features, genres2int, title_set, title_max_len, movies]
mv_net=mv_network(data_features)
try:
mv_net.model = tf.keras.models.load_model('mv_net.h5')
mv_net_info = joblib.load('./mv_net_info.m')
mv_net.batch_size,mv_net.learning_rate,mv_net.embed_dim,mv_net.dropout_keep,mv_net.filter_num,mv_net.slide_window,mv_net.best_loss,mv_net.losses,mv_net.MODEL_DIR = \
mv_net_info[0],mv_net_info[1],mv_net_info[2],mv_net_info[3],mv_net_info[4],mv_net_info[5],mv_net_info[6],mv_net_info[7],mv_net_info[8]
movie_matrix = pickle.load(open('movie_matrix.p','rb'))
user_matrix = pickle.load(open('user_matrix.p', 'rb'))
mv_net.movie_matrix = movie_matrix
mv_net.user_matrix = user_matrix
#print(mv_net.model.summary())
print(mv_net.Recommend_similary_items(3,5))
except IOError:
print("Train the model for the first time!")
mv_net.get_params(data_features)
mv_net.training(users,movies,features, targets, epochs=1,log_freq = 50)
print("train end")
slide_window = [2,3,4,5]
mv_net_info = [mv_net.batch_size,mv_net.learning_rate,mv_net.embed_dim,mv_net.dropout_keep,mv_net.filter_num,mv_net.slide_window,mv_net.best_loss,mv_net.losses,mv_net.MODEL_DIR]
joblib.dump(mv_net_info, './mv_net_info.m')
#joblib.dump(mv_net,'./mv_net.m')
mv_net.model.save('mv_net.h5')
print("success")
if __name__ == '__main__':
test()
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(
__file__), "../src"))
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
import collections
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
import label_io
import utils
"""
Functions to write interpolated surface meshes for perscribed wall motion
"""
def cubic_hermite_spline_ipl(time, t_m, dt_m, boundary_queue):
"""
Cubic Hermite spline interpolation for nodes movement
see https://en.wikipedia.org/wiki/Cubic_Hermite_spline
Args:
time: time index in range(num_itpls)+1
t_m: initial time point
dt_m: number of interpolations
boundary_queue: list of VTK PolyData
Returns:
coords: coordinates of the interpolated mesh
"""
boundary0 = vtk_to_numpy(boundary_queue[0].GetPoints().GetData())
boundary1 = vtk_to_numpy(boundary_queue[1].GetPoints().GetData())
boundary2 = vtk_to_numpy(boundary_queue[2].GetPoints().GetData())
boundary3 = vtk_to_numpy(boundary_queue[3].GetPoints().GetData())
dim = boundary0.shape[-1]
t_a = (float(time) - t_m)/dt_m
h_00 = 2*t_a*t_a*t_a - 3*t_a*t_a + 1
h_10 = t_a*t_a*t_a - 2*t_a*t_a + t_a
h_01 = - 2*t_a*t_a*t_a + 3*t_a*t_a
h_11 = t_a*t_a*t_a - t_a*t_a
v_m = (boundary2-boundary0)/dt_m/2
v_m1 = (boundary3-boundary1)/dt_m/2
coords = h_00*boundary1 + h_01*boundary2 + h_10*v_m*dt_m + h_11*v_m1*dt_m
return coords
def find_index_in_array(x, y):
"""
For x being a list containing y, find the index of each element of y in x
"""
xsorted = np.argsort(x)
ypos = np.searchsorted(x[xsorted], y)
indices = xsorted[ypos]
return indices
def move_mesh(fns, start_point, intpl_num, num_cycle, duration, output_var):
total_num_phase = len(fns)
total_steps = total_num_phase * (intpl_num+1)*num_cycle
initialized = False
poly_template = label_io.loadVTKMesh(fns[start_point])
ref_coords = vtk_to_numpy(poly_template.GetPoints().GetData())
store = np.zeros((poly_template.GetNumberOfPoints(), 3, total_steps+1))
### Cubic Hermite Spline
# count = 0
# coord = []
# # First cycle
# for msh_idx in list(range(start_point, total_num_phase))+ list(range(0, start_point)):
# if not initialized:
# boundary_queue = collections.deque(4*[None], 4)
# boundary_queue.append(label_io.loadVTKMesh(fns[(msh_idx+total_num_phase-1)%total_num_phase]))
# boundary_queue.append(label_io.loadVTKMesh(fns[msh_idx]))
# boundary_queue.append(label_io.loadVTKMesh(fns[(msh_idx+1)%total_num_phase]))
# boundary_queue.append(label_io.loadVTKMesh(fns[(msh_idx+2)%total_num_phase]))
# initialized = True
# else:
# boundary_queue.append(label_io.loadVTKMesh(fns[(msh_idx+2)%total_num_phase]))
# coord.append(vtk_to_numpy(label_io.loadVTKMesh(fns[msh_idx]).GetPoints().GetData())[1000,:])
# for i_idx in range(intpl_num+1):
# new_coords = cubic_hermite_spline_ipl(i_idx, 0, intpl_num+1, boundary_queue)
# displacement = new_coords - ref_coords
# store[:, :, count] = displacement
# count+=1
### Natural Cubic Spline
time_raw = np.linspace(0,duration,total_num_phase+1)
disp_raw = np.zeros((poly_template.GetNumberOfPoints(), 3, total_num_phase+1))
count = 0
for msh_idx in list(range(start_point, total_num_phase))+ list(range(0, start_point)):
disp_raw[:,:,count] = vtk_to_numpy(label_io.loadVTKMesh(fns[msh_idx]).GetPoints().GetData())
count += 1
disp_raw[:,:,count] = vtk_to_numpy(label_io.loadVTKMesh(fns[start_point]).GetPoints().GetData())
time_ipl = np.linspace(0,duration,total_num_phase * (intpl_num+1)+1)
disp_ipl = np.zeros((poly_template.GetNumberOfPoints(), 3, total_num_phase * (intpl_num+1)+1))
velo_ipl = np.zeros((poly_template.GetNumberOfPoints(), 3, total_num_phase * (intpl_num+1)+1))
for i in range(poly_template.GetNumberOfPoints()):
for j in range(3):
cs = interpolate.CubicSpline(time_raw,disp_raw[i,j,:],bc_type='clamped')
disp_ipl[i,j,:] = cs(time_ipl) - ref_coords[i,j]
velo_ipl[i,j,:] = cs(time_ipl,1)
# The rest cycles are copies of first cycle
for c in range(0,num_cycle):
s = c*total_num_phase * (intpl_num+1)
e = s + total_num_phase * (intpl_num+1)
if output_var == "displacement":
print("Output displacement.")
store[:,:,s:e] = disp_ipl[:, :,0:-1]
elif output_var == "velocity":
print("Output velocity.")
store[:,:,s:e] = velo_ipl[:, :,0:-1]
return store
def write_motion(fns, start_point, intpl_num, output_dir, num_cycle, duration, output_var, debug=False):
total_num_phase = len(fns)
total_steps = num_cycle* total_num_phase * (intpl_num+1)+1
initialized = False
poly_template = label_io.loadVTKMesh(fns[start_point])
displacements = move_mesh(fns, start_point, intpl_num, num_cycle, duration, output_var)
if debug:
import vtk
debug_dir = os.path.join(output_dir,"Debug")
try:
os.makedirs(debug_dir)
except Exception as e: print(e)
coords = vtk_to_numpy(poly_template.GetPoints().GetData())
poly = vtk.vtkPolyData()
poly.DeepCopy(poly_template)
for ii in range(displacements.shape[-1]):
poly.GetPoints().SetData(numpy_to_vtk(displacements[:,:,ii]+coords))
fn_debug = os.path.join(debug_dir, "debug%05d.vtp" %ii)
label_io.writeVTKPolyData(poly, fn_debug)
node_ids = vtk_to_numpy(poly_template.GetPointData().GetArray('GlobalNodeID'))
face_ids = vtk_to_numpy(poly_template.GetCellData().GetArray('ModelFaceID'))
#write time steps and node numbers
for face in np.unique(face_ids):
fn = os.path.join(output_dir, '%d_motion.dat' % face)
face_poly = utils.thresholdPolyData(poly_template, 'ModelFaceID', (face,face))
f = open(fn, 'w')
f.write('{} {} {}\n'.format(3, total_steps,face_poly.GetNumberOfPoints()))
for t in np.linspace(0,num_cycle*duration, total_steps):
f.write('{}\n'.format(t))
#f.write('{}\n'.format(face_poly.GetNumberOfPoints()))
face_ids = vtk_to_numpy(face_poly.GetPointData().GetArray('GlobalNodeID'))
node_id_index = find_index_in_array(node_ids, face_ids)
for i in node_id_index:
disp = displacements[i, :, :]
f.write('{}\n'.format(node_ids[i]))
for j in range(total_steps):
f.write('{} {} {}\n'.format(disp[0,j], disp[1,j],disp[2,j]))
f.close()
if __name__=='__main__':
import time
import argparse
start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', help="Path to the surface meshes")
parser.add_argument('--output_dir', help="Path to the volume meshes")
parser.add_argument('--num_interpolation', type=int, help="Number of interpolations")
parser.add_argument('--num_cycle', type=int, help="Number of cardiac cycles")
parser.add_argument('--duration', type=float, help="Cycle duration in seconds")
parser.add_argument('--phase', default=-1, type=int, help="Id of the phase to generate volume mesh")
parser.add_argument('--output_var', help="Output displacement or velocity")
args = parser.parse_args()
mesh_dir = args.input_dir
output_dir = os.path.join(args.output_dir, 'mesh-complete')
try:
os.makedirs(output_dir)
except Exception as e: print(e)
import glob
fns = sorted(glob.glob(os.path.join(mesh_dir, "*.vtp")))
write_motion(fns, args.phase ,args.num_interpolation, output_dir, args.num_cycle, args.duration, args.output_var, debug=False)
end = time.time()
print("Time spent: ", end-start)
|
<filename>input_handlers.py
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
import tcod.event
from actions import Action, BumpAction, EscapeAction, WaitAction
if TYPE_CHECKING:
from engine import Engine
MOVE_KEYS = {
# Arrow keys.
tcod.event.K_UP: (0, -1),
tcod.event.K_DOWN: (0, 1),
tcod.event.K_LEFT: (-1, 0),
tcod.event.K_RIGHT: (1, 0),
tcod.event.K_HOME: (-1, -1),
tcod.event.K_END: (-1, 1),
tcod.event.K_PAGEUP: (1, -1),
tcod.event.K_PAGEDOWN: (1, 1),
# Numpad keys.
tcod.event.K_KP_1: (-1, 1),
tcod.event.K_KP_2: (0, 1),
tcod.event.K_KP_3: (1, 1),
tcod.event.K_KP_4: (-1, 0),
tcod.event.K_KP_6: (1, 0),
tcod.event.K_KP_7: (-1, -1),
tcod.event.K_KP_8: (0, -1),
tcod.event.K_KP_9: (1, -1),
# Vi keys.
tcod.event.K_h: (-1, 0),
tcod.event.K_j: (0, 1),
tcod.event.K_k: (0, -1),
tcod.event.K_l: (1, 0),
tcod.event.K_y: (-1, -1),
tcod.event.K_u: (1, -1),
tcod.event.K_b: (-1, 1),
tcod.event.K_n: (1, 1),
}
WAIT_KEYS = {
tcod.event.K_PERIOD,
tcod.event.K_KP_5,
tcod.event.K_CLEAR,
}
class EventHandler(tcod.event.EventDispatch[Action]):
def __init__(self, engine: Engine):
self.engine = engine
def handle_events(self, context: tcod.context.Context) -> None:
for event in tcod.event.wait():
context.convert_event(event)
self.dispatch(event)
def ev_mousemotion(self, event: tcod.event.MouseMotion) -> None:
if self.engine.game_map.in_bounds(event.tile.x, event.tile.y):
self.engine.mouse_location = event.tile.x, event.tile.y
def ev_quit(self, event: tcod.event.Quit) -> Optional[Action]:
raise SystemExit()
def on_render(self, console: tcod.Console) -> None:
self.engine.render(console)
class MainGameEventHandler(EventHandler):
def handle_events(self, context: tcod.context.Context) -> None:
for event in tcod.event.wait():
context.convert_event(event)
action = self.dispatch(event)
if action is None:
continue
action.perform()
self.engine.handle_enemy_turns()
self.engine.update_fov() # Update the FOV before the players next action
def ev_keydown(self, event: tcod.event.KeyDown) -> Optional[Action]:
action: Optional[Action] = None
key = event.sym
player = self.engine.player
if key in MOVE_KEYS:
dx, dy = MOVE_KEYS[key]
action = BumpAction(player, dx, dy)
elif key in WAIT_KEYS:
action = WaitAction(player)
elif key == tcod.event.K_ESCAPE:
action = EscapeAction(player)
elif key == tcod.event.K_v:
self.engine.event_handler = HistoryViewer(self.engine)
# No valid key was pressed
return action
class GameOverEventHandler(EventHandler):
def handle_events(self, context: tcod.context.Context) -> None:
for event in tcod.event.wait():
action = self.dispatch(event)
if action is None:
continue
action.perform()
def ev_keydown(self, event: tcod.event.KeyDown) -> Optional[Action]:
action: Optional[Action] = None
key = event.sym
if key == tcod.event.K_ESCAPE:
action = EscapeAction(self.engine.player)
# No valid key was pressed
return action
CURSOR_Y_KEYS = {
tcod.event.K_UP: -1,
tcod.event.K_DOWN: 1,
tcod.event.K_PAGEUP: -10,
tcod.event.K_PAGEDOWN: 10,
}
class HistoryViewer(EventHandler):
"""Print the history on a larger window which can be navigated."""
def __init__(self, engine: Engine):
super().__init__(engine)
self.log_length = len(engine.message_log.messages)
self.cursor = self.log_length - 1
def on_render(self, console: tcod.Console) -> None:
super().on_render(console) # Draw the main state as the background.
log_console = tcod.Console(console.width - 6, console.height - 6)
# Draw a frame with a custom banner title.
log_console.draw_frame(0, 0, log_console.width, log_console.height)
log_console.print_box(
0, 0, log_console.width, 1, "┤Message history├", alignment=tcod.CENTER
)
# Render the message log using the cursor parameter.
self.engine.message_log.render_messages(
log_console,
1,
1,
log_console.width - 2,
log_console.height - 2,
self.engine.message_log.messages[: self.cursor + 1],
)
log_console.blit(console, 3, 3)
def ev_keydown(self, event: tcod.event.KeyDown) -> None:
# Fancy conditional movement to make it feel right.
if event.sym in CURSOR_Y_KEYS:
adjust = CURSOR_Y_KEYS[event.sym]
if adjust < 0 and self.cursor == 0:
# Only move from the top to the bottom when you're on the edge.
self.cursor = self.log_length - 1
elif adjust > 0 and self.cursor == self.log_length - 1:
# Same with bottom to top movement.
self.cursor = 0
else:
# Otherwise move while staying clamped to the bounds of the history log.
self.cursor = max(0, min(self.cursor + adjust, self.log_length - 1))
elif event.sym == tcod.event.K_HOME:
self.cursor = 0 # Move directly to the top message.
elif event.sym == tcod.event.K_END:
self.cursor = self.log_length - 1 # Move directly to the last message.
else: # Any other key moves back to the main game state.
self.engine.event_handler = MainGameEventHandler(self.engine)
|
<reponame>AlexJew/CityEnergyAnalyst
"""
MIT License
Copyright (c) 2019 TUMCREATE <https://tum-create.edu.sg/>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import division
import datetime
import pandas as pd
from legacy.flexibility_model.model_building import building_extract_cea_data
from legacy.flexibility_model.model_building import building_process_hvac_efficiencies
from legacy.flexibility_model.model_building import building_setup_district
from legacy.flexibility_model.model_building import building_write_definitions
from legacy.flexibility_model.model_building import DELTA_P_DIM, DENSITY_AIR, HEAT_CAPACITY_AIR, HE_E, H_I, \
PHI_5_MAX, FB, HP_ETA_EX_COOL, HP_AUXRATIO
from cea.constants import HOURS_IN_YEAR
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def main(locator,
weather_path,
scenario,
parameter_set,
time_start,
time_end,
time_step_ts,
set_temperature_goal,
constant_temperature
):
# Preliminary step - time
date_and_time_prediction = pd.date_range(start=time_start, end=time_end, freq=pd.to_timedelta(time_step_ts))
time_step = date_and_time_prediction[1] - date_and_time_prediction[0]
time_end_object = datetime.datetime.strptime(time_end, '%Y-%m-%d %H:%M:%S')
last_step_plus_1 = time_end_object + time_step
last_step_plus_1_str = datetime.datetime.strftime(last_step_plus_1, '%Y-%m-%d %H:%M:%S')
date_and_time_prediction_plus_1 = pd.date_range(
start=time_start, end=last_step_plus_1_str, freq=pd.to_timedelta(time_step_ts))
# Getting and writting general data
(
internal_loads_df,
indoor_comfort_df,
construction_envelope_systems_df,
leakage_envelope_systems_df,
window_envelope_systems_df,
roofs_envelope_systems_df,
wall_envelope_systems_df,
shading_envelope_systems_df,
emission_systems_heating_df,
emission_systems_cooling_df,
emission_systems_controller_df,
system_controls_ini_df,
cooling_generation_df,
zone_occupancy_df,
zone_df,
architecture_df,
technical_systems_df,
supply_systems_df,
weather_general_info,
weather_timeseries_initial_df,
occupancy_types_full,
occupancy_types,
buildings_names,
building_geometry_all,
occupancy_types_full_cardinal,
buildings_cardinal,
occupancy_types_cardinal,
occupants_probability_dic,
lighting_appliances_probability_dic,
processes_probability_dic,
monthly_use_probability_df,
occupancy_density_m2_p,
footprint,
gross_floor_area_m2,
floors_cardinal_df,
total_gross_floor_area_m2,
mean_floor_height_m,
system_controls_df,
supply_temperature_df,
emissions_cooling_type_dic,
emissions_controller_type_dic,
generation_cooling_code_dic,
occupancy_per_building_cardinal,
occupancy_per_building_list,
T_int_cea_dic,
T_ext_cea_df
) = building_extract_cea_data.main(locator, weather_path,
time_start,
time_end
)
(
date_and_time,
year,
wet_bulb_temperature_df,
occupancy_probability_df
) = building_write_definitions.main(locator, scenario,
date_and_time_prediction,
time_start,
time_end,
time_step,
parameter_set,
internal_loads_df,
construction_envelope_systems_df,
leakage_envelope_systems_df,
window_envelope_systems_df,
roofs_envelope_systems_df,
wall_envelope_systems_df,
shading_envelope_systems_df,
zone_occupancy_df,
architecture_df,
weather_general_info,
weather_timeseries_initial_df,
occupancy_types,
occupancy_types_cardinal,
buildings_names,
building_geometry_all,
occupants_probability_dic,
lighting_appliances_probability_dic,
processes_probability_dic,
monthly_use_probability_df,
occupancy_density_m2_p,
gross_floor_area_m2,
mean_floor_height_m,
DELTA_P_DIM,
HE_E,
H_I,
DENSITY_AIR,
HEAT_CAPACITY_AIR,
supply_temperature_df,
emissions_cooling_type_dic
)
(
prediction_horizon,
center_interval_temperatures_dic,
set_setback_temperatures_dic,
setback_boolean_dic,
heating_boolean,
cooling_boolean,
set_temperatures_dic
) = building_setup_district.main(
date_and_time_prediction,
time_step,
set_temperature_goal,
constant_temperature,
buildings_names,
system_controls_df,
occupancy_per_building_cardinal,
occupancy_per_building_list,
occupancy_probability_df,
indoor_comfort_df,
T_int_cea_dic
)
electricity_prices_MWh = pd.read_excel(locator.get_database_supply_systems(), "DETAILED_ELEC_COSTS")
electricity_prices_MWh["PRICE ($/MWh)"] = electricity_prices_MWh["Opex_var_buy_USD2015perkWh"]*1000
electricity_prices_MWh["our_datetime"] = pd.date_range(start='1/1/2005', periods=HOURS_IN_YEAR)
electricity_prices_MWh.set_index('our_datetime', inplace=True)
(
Qcsmax_Wm2_dic,
em_efficiency_mean_dic,
) = building_process_hvac_efficiencies.main(locator,
buildings_names,
footprint,
buildings_cardinal,
cooling_generation_df,
emission_systems_cooling_df,
emission_systems_controller_df,
generation_cooling_code_dic,
emissions_cooling_type_dic,
emissions_controller_type_dic,
set_temperatures_dic,
T_ext_cea_df,
wet_bulb_temperature_df,
prediction_horizon,
date_and_time_prediction,
occupancy_per_building_cardinal,
occupancy_per_building_list,
supply_temperature_df,
PHI_5_MAX,
FB,
HP_ETA_EX_COOL,
HP_AUXRATIO
)
return (
prediction_horizon,
date_and_time_prediction,
date_and_time_prediction_plus_1,
time_step,
year,
buildings_names,
buildings_cardinal,
center_interval_temperatures_dic,
set_setback_temperatures_dic,
setback_boolean_dic,
heating_boolean,
cooling_boolean,
set_temperatures_dic,
occupancy_per_building_cardinal,
occupancy_per_building_list,
gross_floor_area_m2,
total_gross_floor_area_m2,
indoor_comfort_df,
occupancy_density_m2_p,
occupancy_probability_df,
em_efficiency_mean_dic,
Qcsmax_Wm2_dic,
electricity_prices_MWh
)
|
# coding: utf-8
"""*****************************************************************************
* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
################################################################################
#### Business Logic ####
################################################################################
def genDmaHeaderFile(symbol, event):
symbol.setEnabled(event["value"])
def genDmaHeaderMappingFile(symbol, event):
symbol.setEnabled(event["value"])
def genDmaSystemDefFile(symbol, event):
symbol.setEnabled(event["value"])
def enableDependencySymbols(symbol, event):
if (Database.getSymbolValue("HarmonyCore", "ENABLE_SYS_COMMON") == False):
Database.setSymbolValue("HarmonyCore", "ENABLE_SYS_COMMON", event["value"])
############################################################################
#### Code Generation ####
############################################################################
deviceFile = ""
if("PIC32M" in Variables.get("__PROCESSOR")):
deviceFile = "_pic32m"
genSysDMACommonFiles = harmonyCoreComponent.createBooleanSymbol("ENABLE_SYS_DMA", None)
genSysDMACommonFiles.setLabel("Enable System DMA")
enableDependency = harmonyCoreComponent.createBooleanSymbol("ENABLE_SYS_DMA_DEPENDENCY", None)
enableDependency.setLabel("Enable System DMA Dependencies")
enableDependency.setVisible(False)
enableDependency.setDependencies(enableDependencySymbols, ["ENABLE_SYS_DMA"])
dmaHeaderFile = harmonyCoreComponent.createFileSymbol("DMA_HEADER", None)
dmaHeaderFile.setSourcePath("system/dma/templates/sys_dma" + deviceFile + ".h.ftl")
dmaHeaderFile.setOutputName("sys_dma.h")
dmaHeaderFile.setDestPath("system/dma/")
dmaHeaderFile.setProjectPath("config/" + configName + "/system/dma/")
dmaHeaderFile.setType("HEADER")
dmaHeaderFile.setMarkup(True)
dmaHeaderFile.setOverwrite(True)
dmaHeaderFile.setEnabled(False)
dmaHeaderFile.setDependencies(genDmaHeaderFile, ["ENABLE_SYS_DMA"])
dmaSourceFile = harmonyCoreComponent.createFileSymbol("DMA_SOURCE", None)
dmaSourceFile.setSourcePath("system/dma/templates/sys_dma" + deviceFile + ".c.ftl")
dmaSourceFile.setOutputName("sys_dma.c")
dmaSourceFile.setDestPath("system/dma/")
dmaSourceFile.setProjectPath("config/" + configName + "/system/dma/")
dmaSourceFile.setType("SOURCE")
dmaSourceFile.setMarkup(True)
dmaSourceFile.setOverwrite(True)
dmaSourceFile.setEnabled(False)
dmaSourceFile.setDependencies(genDmaHeaderFile, ["ENABLE_SYS_DMA"])
dmaHeaderMappingFile = harmonyCoreComponent.createFileSymbol("DMA_MAPPING", None)
dmaHeaderMappingFile.setSourcePath("system/dma/templates/sys_dma_mapping" + deviceFile + ".h.ftl")
dmaHeaderMappingFile.setOutputName("sys_dma_mapping.h")
dmaHeaderMappingFile.setDestPath("system/dma/")
dmaHeaderMappingFile.setProjectPath("config/" + configName + "/system/dma/")
dmaHeaderMappingFile.setType("HEADER")
dmaHeaderMappingFile.setMarkup(True)
dmaHeaderMappingFile.setOverwrite(True)
dmaHeaderMappingFile.setEnabled(False)
dmaHeaderMappingFile.setDependencies(genDmaHeaderMappingFile, ["ENABLE_SYS_DMA"])
dmaSystemDefFile = harmonyCoreComponent.createFileSymbol("DMA_DEF", None)
dmaSystemDefFile.setType("STRING")
dmaSystemDefFile.setOutputName("core.LIST_SYSTEM_DEFINITIONS_H_INCLUDES")
dmaSystemDefFile.setSourcePath("system/dma/templates/system/system_definitions.h.ftl")
dmaSystemDefFile.setMarkup(True)
dmaSystemDefFile.setOverwrite(True)
dmaSystemDefFile.setEnabled(False)
dmaSystemDefFile.setDependencies(genDmaSystemDefFile, ["ENABLE_SYS_DMA"])
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 14 10:44:23 2016
@author: wexiao
"""
from rpca.pcp import pcp
from rpca.utility import solve_proj2
import numpy as np
from sklearn.utils.extmath import randomized_svd
from collections import deque
import itertools
def omwrpca_cp(M, burnin, win_size, track_cp_burnin, n_check_cp, alpha, proportion, n_positive, min_test_size,
tolerance_num=0, lambda1=np.nan, lambda2=np.nan, factor=1):
"""
The loss function is
min_{L,S} { 1/2||M-L-S||_F^2 + lambda1||L||_* + lambda2*||S(:)||_1}
based on moving window.
Parameters
----------
M : array-like, shape (n_features, n_samples), which will be decomposed into a sparse matrix S
and a low-rank matrix L.
burnin : burnin sample size. We require burnin >= win_size.
win_size : length of moving window. We require win_size <= burnin.
track_cp_burnin: the first track_cp_burnin samples generated from omwrpca algorithm will exclude
for track change point. Because the result may be unstable.
n_check_cp: buffer size to track changepoint.
alpha: threshold value used in the hypothesis test. Hypothesis test is applied to track subspace changing.
We suggest use the value 0.01.
tolerance_num: offset of numbers used in hypothesis test to track change point. A larger tolerance_num gives
a more robust result. We restrict tolerance_num to be a non-negative integer. The default value of
tolerance_num is 0.
lambda1, lambda2:tuning parameters
factor: parameter factor for PCP.
Returns
----------
Lhat : array-like, low-rank matrix.
Shat : array-like, sparse matrix.
rank : rank of low-rank matrix.
References
----------
Rule of thumb for tuning paramters:
lambda1 = 1.0/np.sqrt(m);
lambda2 = 1.0/np.sqrt(m);
"""
m, n = M.shape
# parameter setting
assert burnin >= win_size, "Parameter burin should be larger than or equal to parameter win_size."
if n < burnin:
print "Parameter burin should be less than or equal to the number of columns of input matrix. Program stops."
return np.empty((m,0)), np.empty((m,0)), [], [], []
if np.isnan(lambda1):
lambda1 = 1.0/np.sqrt(m)
if np.isnan(lambda2):
lambda2 = 1.0/np.sqrt(m)
# calculate pcp on burnin samples and find rank r
Lhat, Shat, niter, r = pcp(M[:, :burnin], factor=factor)
# initialization for omwrpca
Uhat, sigmas_hat, Vhat = randomized_svd(Lhat, n_components=r, n_iter=5, random_state=0)
U = Uhat.dot(np.sqrt(np.diag(sigmas_hat)))
Vhat_win = Vhat[:, -win_size:]
A = np.zeros((r, r))
B = np.zeros((m, r))
for i in range(Vhat_win.shape[1]):
A = A + np.outer(Vhat_win[:, i], Vhat_win[:, i])
B = B + np.outer(M[:, burnin - win_size + i] - Shat[:, burnin - win_size + i], Vhat_win[:, i])
# initialization for change points tracking
# dist_num_sparses: distribution of the number of nonzero elements of columns of sparse matrix
# used for tracking change point
dist_num_sparses = np.zeros(m+1)
# buffer_num: number of nonzero elements of columns of sparse matrix in the buffer used for
# tracking change point (buffer size = n_check_cp, queue structure)
buffer_num = deque([])
# buffer_flag: flags of columns of sparse matrix in the buffer used for tracking change point
# (buffer size = n_check_cp, queue structure); flag=1 - potential change point; flag=0 - normal point.
buffer_flag = deque([])
# num_sparses, cp, rvec are returned by the function
# initialize num_sparses to track the number of nonzero elements of columns of sparse matrix
num_sparses = list((Shat != 0).sum(axis=0))
# initialize change points to an empty list
cp = []
# initialize list of rank to [r]
rvec = [r]
# main loop
i = burnin
while i < n:
mi = M[:, i]
vi, si = solve_proj2(mi, U, lambda1, lambda2)
Shat = np.hstack((Shat, si.reshape(m,1)))
vi_delete = Vhat_win[:,0]
Vhat_win = np.hstack((Vhat_win[:,1:], vi.reshape(r,1)))
A = A + np.outer(vi, vi) - np.outer(vi_delete, vi_delete)
B = B + np.outer(mi - si, vi) - np.outer(M[:, i - win_size] - Shat[:, i - win_size], vi_delete)
U = update_col(U, A, B, lambda1)
Lhat = np.hstack((Lhat, U.dot(vi).reshape(m,1)))
num_sparses.append((si.reshape(m,1) != 0).sum())
if i >= burnin + track_cp_burnin and i < burnin + track_cp_burnin + min_test_size:
num = (si != 0).sum()
dist_num_sparses[num] += 1
elif i >= burnin + track_cp_burnin + min_test_size: # do hypothesis testing to find chang point
num = (si != 0).sum()
buffer_num.append(num)
pvalue = dist_num_sparses[max(num - tolerance_num, 0):].sum() / dist_num_sparses.sum()
if pvalue <= alpha:
buffer_flag.append(1)
else:
buffer_flag.append(0)
if len(buffer_flag) >= n_check_cp: # check change point
if len(buffer_flag) == n_check_cp + 1:
dist_num_sparses[buffer_num[0]] += 1
buffer_num.popleft()
buffer_flag.popleft()
nabnormal = sum(buffer_flag)
# potential change identified
if nabnormal >= n_check_cp * float(proportion):
for k in range(n_check_cp - n_positive +1):
# use the earliest change point if change point exists
if sum(itertools.islice(buffer_flag, k, k+n_positive)) == n_positive:
changepoint = i - n_check_cp + 1 + k
cp.append(changepoint)
Lhat = Lhat[:, :changepoint]
Shat = Shat[:, :changepoint]
M_update = M[:, changepoint:]
num_sparses = num_sparses[:changepoint]
# recursively call omwrpca_cp
Lhat_update, Shat_update, rvec_update, cp_update, num_sparses_update = \
omwrpca_cp(M_update, burnin, win_size, track_cp_burnin, n_check_cp, alpha,
proportion, n_positive, min_test_size, tolerance_num, lambda1, lambda2, factor)
# update Lhat, Shat, rvec, num_sparses, cp
Lhat = np.hstack((Lhat, Lhat_update))
Shat = np.hstack((Shat, Shat_update))
rvec.extend(rvec_update)
num_sparses.extend(num_sparses_update)
cp.extend([changepoint + j for j in cp_update])
return Lhat, Shat, rvec, cp, num_sparses
i += 1
return Lhat, Shat, rvec, cp, num_sparses
def update_col(U, A, B, lambda1):
m, r = U.shape
A = A + lambda1*np.identity(r)
for j in range(r):
bj = B[:,j]
uj = U[:,j]
aj = A[:,j]
temp = (bj - U.dot(aj))/A[j,j] + uj
U[:,j] = temp/max(np.linalg.norm(temp), 1)
return U |
<reponame>anastasia-spb/CarND-Advanced-Lane-Lines
import numpy as np
import cv2
import os
import glob
import ntpath
import matplotlib.pyplot as plt
class ParamsStruct():
def __init__(self, count=0, window_name="", img=None, points=[], draw_help_lines=True):
self.count = count
self.window_name = window_name
self.img = img
self.points = points
self.draw_help_lines = draw_help_lines
def mouseCB(event, x, y, flags, param):
'''
Function which is called when event inside window happens
'''
if event == cv2.EVENT_LBUTTONDBLCLK or event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(param.img, (x, y), 3, (0, 0, 255), 3) # BGR
if param.count == 0 and param.draw_help_lines == True:
cv2.line(param.img, (x, y), (param.img.shape[1], y), (0, 0, 255), 3)
if param.count == 2 and param.draw_help_lines == True:
cv2.line(param.img, (0, y), (x, y), (0, 0, 255), 3)
param.count += 1
param.points.append([x, y])
cv2.imshow(param.window_name, param.img)
def choose_points(img_input, draw_help_lines=True):
"""
Choose four input points for calculation matrix
for perspective transformation and then press 'q'
for quit
1. Choose upper left points
2. Choose upper right point
3. Choose bottom right point
4. Choose bottom left point
Important observations: choose bottom points
as much as possible close to the image bottom
"""
copy_img = img_input.copy()
window_name = "Choose source points"
points = []
param = ParamsStruct(0, window_name, copy_img, points, draw_help_lines)
cv2.namedWindow(window_name)
cv2.imshow(window_name, copy_img)
cv2.setMouseCallback(window_name, mouseCB, param)
while True:
ip = cv2.waitKey(0) & 0xFF
if ip == ord('q'):
break
cv2.destroyAllWindows()
return param.points
def align_manually_chosen_points(points, offset_top=0, offset_bottom=0):
'''
Correct chosen source point so they will form trapezium
'''
# We suppose that lines are parallel
points_list = []
dist = points[0][0] - points[3][0]
# upper left point
x = points[0][0] - offset_top
y = points[0][1]
points_list.append([x, y])
# upper right point
x = points[1][0] + offset_top
y = points[0][1] # on the same level as upper left point
points_list.append([x, y])
# bottom right point
x = points[1][0] + dist + offset_bottom
y = points[2][1]
points_list.append([x, y])
# bottom left point
x = points[3][0] - offset_bottom
y = points[2][1]
points_list.append([x, y])
src = np.float32(points_list)
return src, points_list
def roi(img, vertices):
'''
Set to zero all pixels which are not in ROI
'''
# blank mask
mask = np.zeros_like(img)
# filling pixels inside the polygon defined by vertices with the fill color
cv2.fillPoly(mask, vertices, 255)
# returning the image with all pixels
masked_background = cv2.bitwise_or(img, mask)
return masked_background
def plot_mask_on_image(src_img, points_list):
'''
Set to zero all pixels which are not in ROI
'''
vertices = np.array([points_list], dtype=np.int32)
masked_img = roi(src_img, vertices)
return masked_img
def set_destination_points(input_img, offset_x=300, offset_y=0):
'''
Choose destination source points for perspective transformation
'''
[height, width, z] = input_img.shape
dst = np.float32(
[[offset_x, offset_y], [width - offset_x, offset_y], [width - offset_x, height - offset_y],
[offset_x, height - offset_y]])
return dst
def region_of_interest(img, vertices, inside=False):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
if inside == False:
masked_image = cv2.bitwise_and(img, mask)
else:
masked_image = cv2.bitwise_and(img, ~mask)
return masked_image
def mask_image(input_img, inside):
masked_img = np.copy(input_img)
mask_points = choose_points(input_img, draw_help_lines=False)
vertices_mask = np.array([mask_points], dtype=np.int32)
masked_img = region_of_interest(input_img, vertices_mask, inside)
return masked_img
def calculate_perpective_matrix(input_img):
'''
Pipeline for perspective transformation step
'''
points = choose_points(input_img)
src, points_list = align_manually_chosen_points(points)
result = plot_mask_on_image(input_img, points_list)
dst = set_destination_points(input_img)
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
return M, Minv, result
def visualize(img_ref, warped):
# Visualize
fig, ((ax1, ax2)) = plt.subplots(ncols=2, nrows=1, figsize=(20, 10))
ax1.set_title('Original image')
ax1.imshow(img_ref)
img_ref_with_line = np.copy(warped)
lineThickness = 2
x = 50
cv2.line(img_ref_with_line, (x, 0), (x, img_ref.shape[0]), (0, 255, 0), lineThickness)
ax2.set_title('Transformed image')
ax2.imshow(img_ref_with_line)
plt.show()
return
def save_binary_image(img, M, Minv, name_dir, img_name):
directory = '../test_images/' + name_dir
if not os.path.exists(directory):
os.makedirs(directory)
img_name_no_ext = os.path.splitext(img_name)[0]
directory_img = directory + '/' + img_name_no_ext + '.npy'
directory_M = directory + '/M_' + img_name_no_ext + '.npy'
directory_M_inv = directory + '/Minv_' + img_name_no_ext + '.npy'
np.save(directory_img, img)
np.save(directory_M, M)
np.save(directory_M_inv, Minv)
def load_and_visu_stored_arrays():
arrays_names = glob.glob('../test_images/warped/test*.npy')
for fname in arrays_names:
img = np.load(fname)
plt.imshow(img)
plt.show()
def main():
show_results = False
if show_results == False:
images_names = glob.glob('../test_images/combined_binary/test*.jpg')
destination_dir_name = "warped"
for fname in images_names:
img = cv2.imread(fname)
M, Minv, img_with_marked_area = calculate_perpective_matrix(img)
masked_img = mask_image(img, inside=False) # mask outside regions
masked_img_inside = mask_image(masked_img, inside=True) # mask inner region
img_size = (masked_img_inside.shape[1], masked_img_inside.shape[0])
warped = cv2.warpPerspective(masked_img_inside, M, img_size, flags=cv2.INTER_LINEAR)
save_binary_image(warped, M, Minv, destination_dir_name, ntpath.basename(fname))
visualize(img_with_marked_area, warped)
else:
load_and_visu_stored_arrays()
if __name__ == '__main__':
main()
|
<gh_stars>1-10
import unittest
import os
from requests import get
import json
class ParticipantQuerySessionsTest(unittest.TestCase):
host = '127.0.0.1'
port = 40075
login_endpoint = '/api/participant/login'
sessions_endpoint = '/api/participant/sessions'
def setUp(self):
pass
def tearDown(self):
pass
def _make_url(self, hostname, port, endpoint):
return 'https://' + hostname + ':' + str(port) + endpoint
def _get_token_with_login_http_auth(self, username, password):
url = self._make_url(self.host, self.port, self.login_endpoint)
auth_response = get(url=url, verify=False, auth=(username, password))
# HTTP AUTH REQUIRED TO GET TOKEN
self.assertEqual(auth_response.status_code, 200)
self.assertEqual(auth_response.headers['Content-Type'], 'application/json')
json_auth = auth_response.json()
self.assertTrue(json_auth.__contains__('participant_token'))
return json_auth['participant_token']
def _get_base_token_with_login_http_auth(self, username, password):
url = self._make_url(self.host, self.port, self.login_endpoint)
auth_response = get(url=url, verify=False, auth=(username, password))
# HTTP AUTH REQUIRED TO GET TOKEN
self.assertEqual(auth_response.status_code, 200)
self.assertEqual(auth_response.headers['Content-Type'], 'application/json')
json_auth = auth_response.json()
self.assertTrue(json_auth.__contains__('base_token'))
return json_auth['base_token']
def _request_with_http_auth(self, username, password, payload=None):
if payload is None:
payload = {}
url = self._make_url(self.host, self.port, self.sessions_endpoint)
return get(url=url, verify=False, auth=(username, password), params=payload)
def _request_with_token_auth(self, token, payload=None):
if payload is None:
payload = {}
url = self._make_url(self.host, self.port, self.sessions_endpoint)
request_headers = {'Authorization': 'OpenTera ' + token}
return get(url=url, verify=False, headers=request_headers, params=payload)
def _request_with_no_auth(self, payload=None):
if payload is None:
payload = {}
url = self._make_url(self.host, self.port, self.sessions_endpoint)
return get(url=url, verify=False, params=payload)
def test_query_invalid_http_auth(self):
response = self._request_with_http_auth('invalid', 'invalid')
self.assertEqual(response.status_code, 401)
def test_query_invalid_token_auth(self):
response = self._request_with_token_auth('invalid')
self.assertEqual(response.status_code, 401)
def test_query_http_auth_no_params(self):
response = self._request_with_http_auth('participant1', 'opentera')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-Type'], 'application/json')
json_data = response.json()
self.assertGreater(len(json_data), 0)
for data_item in json_data:
self.assertGreater(len(data_item), 0)
if data_item['id_creator_device']:
self.assertTrue(data_item.__contains__('session_creator_device'))
self.assertTrue(data_item.__contains__('id_creator_participant'))
if data_item['id_creator_participant']:
self.assertTrue(data_item.__contains__('session_creator_participant'))
self.assertTrue(data_item.__contains__('id_creator_user'))
if data_item['id_creator_user']:
self.assertTrue(data_item.__contains__('session_creator_user'))
self.assertTrue(data_item.__contains__('id_session'))
self.assertTrue(data_item.__contains__('id_session_type'))
self.assertTrue(data_item.__contains__('session_comments'))
self.assertTrue(data_item.__contains__('session_duration'))
self.assertTrue(data_item.__contains__('session_name'))
self.assertTrue(data_item.__contains__('session_start_datetime'))
self.assertTrue(data_item.__contains__('session_status'))
self.assertTrue(data_item.__contains__('session_uuid'))
self.assertTrue(data_item.__contains__('session_participants'))
self.assertTrue(data_item.__contains__('session_users'))
def test_query_token_auth_no_params(self):
# HTTP AUTH REQUIRED TO GET TOKEN
token = self._get_token_with_login_http_auth('participant1', 'opentera')
response = self._request_with_token_auth(token)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-Type'], 'application/json')
json_data = response.json()
self.assertGreater(len(json_data), 0)
for data_item in json_data:
self.assertGreater(len(data_item), 0)
self.assertTrue(data_item.__contains__('id_creator_device'))
if data_item['id_creator_device']:
self.assertTrue(data_item.__contains__('session_creator_device'))
self.assertTrue(data_item.__contains__('id_creator_participant'))
if data_item['id_creator_participant']:
self.assertTrue(data_item.__contains__('session_creator_participant'))
self.assertTrue(data_item.__contains__('id_creator_user'))
if data_item['id_creator_user']:
self.assertTrue(data_item.__contains__('session_creator_user'))
self.assertTrue(data_item.__contains__('id_session'))
self.assertTrue(data_item.__contains__('id_session_type'))
self.assertTrue(data_item.__contains__('session_comments'))
self.assertTrue(data_item.__contains__('session_duration'))
self.assertTrue(data_item.__contains__('session_name'))
self.assertTrue(data_item.__contains__('session_start_datetime'))
self.assertTrue(data_item.__contains__('session_status'))
self.assertTrue(data_item.__contains__('session_uuid'))
self.assertTrue(data_item.__contains__('session_participants'))
self.assertTrue(data_item.__contains__('session_users'))
def test_query_base_token(self):
token = self._get_base_token_with_login_http_auth('participant1', 'opentera')
response = self._request_with_token_auth(token)
# Should not be allowed
self.assertEqual(response.status_code, 403)
|
#!/usr/bin/env python3
'''VisibleV8 Builder Tool
Actually a generic chromium-building tool that facilitates checking out
specific [release] revisions of chromium, building selected targets in
a checked-out source tree, and installing artifacts from a build into
a fresh Docker container image.
'''
import argparse
import glob
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
RESOURCE_DIR = os.path.join(os.path.dirname(__file__), "resources")
RE_COMMIT_HASHISH = re.compile(r'^[0-9a-fA-F]+$')
DOCKER_MAJOR_VERSION = 18
RE_DOCKER_MAJOR_VERSION = re.compile(r'^Docker version (\d+)\.')
BUILDER_BASE_IMAGE = "ubuntu:xenial"
DEBUG_OPTIONS = [
"enable_nacl=false",
"is_debug=true",
"is_asan=true",
"v8_enable_debugging_features=true",
"v8_enable_object_print=true",
"v8_optimized_debug=false",
"v8_enable_backtrace=true",
"v8_postmortem_support=true",
]
RELEASE_OPTIONS = [
"enable_nacl=false",
"is_debug=false",
"is_official_build=true",
"use_thin_lto=false", # crazy disk space usage
"is_cfi=false", # required when thin_lto is off
"enable_linux_installer=true",
]
MAGIC_TARGETS = {
'@std': ['chrome', 'chrome/installer/linux:stable_deb', 'v8_shell', 'v8/test/unittests'],
'@chrome': ['chrome', 'chrome/installer/linux:stable_deb'],
'@v8': ['v8_shell', 'v8/test/unittests'],
}
BASELINE_ARTIFACTS = [
'v8_shell', 'unittests', 'icudtl.dat', 'natives_blob.bin', 'snapshot_blob.bin',
]
def docker_test():
'''Check to see if Docker is available/compatible with our needs.
'''
raw = subprocess.check_output(['docker', '--version'])
txt = raw.decode('utf8').strip()
logging.debug(txt)
M = RE_DOCKER_MAJOR_VERSION.match(txt)
if not M:
raise ValueError("Docker version output ('{0}') unparsable".format(txt))
version = int(M.group(1))
if version < DOCKER_MAJOR_VERSION:
raise ValueError("Docker major version ({0}) must be >= {1}".format(version, DOCKER_MAJOR_VERSION))
def docker_check_image(image_name: str) -> bool:
'''Return True if <image_name> exists as a local Docker container image.
'''
# Should always succeed (if docker is there and the version test passed)
raw = subprocess.check_output(['docker', 'image', 'ls', '-q', image_name])
# Empty output means no such image
logging.debug("docker image ls '{0}' -> '{1}'".format(image_name, raw.decode('utf8').strip()))
return bool(raw)
def docker_run_builder(cname: str, iname: str, entry: str, *args, setup_mount: str = None, work_mount: str = None, setup_var="SETUP", work_var="WORKSPACE"):
'''Utility helper to run our builder containers without redundancy.
Runs the container in the foreground and without automatic cleanup (i.e., no '-d' or '--rm' flags).
Raises an exception if the docker client executable returns a non-0 exit status.
Names the container instance <cname>.
Runs the <iname> image.
Sets CWD to `{{dirname(<entry>) or '/'}}` and the entry point to `./{{basename(<entry>)}}`
Passes on any/all <args>.
If <work_mount> is provided, bind "/work" to <work_mount> and set the ENV var "<work_var>=/work".
If <setup_mount> is provided, bind "/setup" to <setup_mount> and set the ENV var "<setup_var>=/setup".
'''
cwd = os.path.dirname(entry) or '/'
ep = './' + os.path.basename(entry)
docker_args = [
'docker', 'run',
'--name', cname]
if work_mount:
docker_args += [
'-v', '%s:/work' % os.path.realpath(work_mount),
'-e', '%s=/work' % work_var]
if setup_mount:
docker_args += [
'-v', '%s:/setup:ro' % os.path.realpath(setup_mount),
'-e', '%s=/setup' % setup_var]
docker_args += [
'-w', cwd,
iname,
ep]
docker_args += args
logging.debug(docker_args)
subprocess.check_call(docker_args)
def get_builder_image(args) -> str:
'''Get the full name/tag of the 'chrome-builder-base' container image to use for checkout/build activity.
If no such image exists locally, build it.
<args.commit> (a Chromium project commit hash) becomes the per-version tag for chrome-builder.
'''
# Check to see if the per-commit image already exists (easy win there)
commit = args.commit
if RE_COMMIT_HASHISH.match(commit):
tagged_name = 'chrome-builder:{0}'.format(commit)
if docker_check_image(tagged_name):
return tagged_name
else:
tagged_name = 'chrome-builder:latest'
commit = "origin/master"
logging.warning("Invalid commit hash '{0}'; provisioning {1} from '{2}'".format(args.commit, tagged_name, commit))
# Build a builder-image, using <commit> to guide the tool installation process and tagging as <tagged_name>
#----------------------------------------------------------------------------------------------------------
# Find our setup scripts here
setup_dir = os.path.realpath(os.path.join(RESOURCE_DIR, "_provision"))
# Spin up the base image running the given scripts
cname = "builder-tool-provision-{0}".format(os.getpid())
logging.info("Running _provision/entry.sh inside image '{0}' (name {1})".format(BUILDER_BASE_IMAGE, cname))
docker_run_builder(cname, BUILDER_BASE_IMAGE, "/setup/entry.sh", commit, setup_mount=setup_dir, work_mount=args.root)
# If all of the above succeeded, it's time to COMMIT that resulting image
logging.info("Committing state of {0} as '{1}'".format(cname, tagged_name))
subprocess.check_call([
'docker', 'commit',
'-m', "Auto-provision of chrome-builder container image for commit '{0}'".format(commit),
'-c', "WORKDIR /work",
'-c', "CMD bash",
cname,
tagged_name])
# Clean up the dangling container itself
logging.info("Deleting container {0}".format(cname))
subprocess.check_call(['docker', 'rm', cname])
return tagged_name
def do_checkout(args):
'''Sync (or checkout, if nonexistent) a Chromium source tree to a particular commit hash/version number.
'''
# We need Docker available
docker_test()
# Get the name/tag of the appropriate builder container (yes, we need that just to checkout)
builder_image = get_builder_image(args)
logging.info("Using build image '{0}' to checkout '{1}' to {2}".format(builder_image, args.commit, args.root))
# Find our setup scripts here
setup_dir = os.path.realpath(os.path.join(RESOURCE_DIR, "checkout"))
# Run the builder container, passing in the checkout entry script as the entry point (all the real work happens here)
cname = "builder-tool-checkout-{0}".format(os.getpid())
docker_run_builder(cname, builder_image,
"/setup/entry.sh", args.commit,
work_mount=args.root,
setup_mount=setup_dir)
# Clean up the dangling container itself (no need to snapshot first)
logging.info("Deleting container {0}".format(cname))
subprocess.check_call(['docker', 'rm', cname])
def do_shell(args):
'''Launch the builder container, with the root workspace mounted, and spawn a shell (REPLACES current process).
Requires at least a `tool.py checkout` first.
'''
# We need Docker available
docker_test()
# Look up the current HEAD of the root git checkout (<args.root>/src/.git/HEAD)
# (this lets us look up a per-commit builder container image)
head_path = os.path.join(args.root, "src", ".git", "HEAD")
if os.path.isfile(head_path):
with open(head_path, "r", encoding="utf8") as fd:
args.commit = fd.read().strip()
else:
logging.error("Not a valid Chromium source tree (no such file '{0}')! Do you need to `checkout` first?".format(head_path))
sys.exit(1)
# Get the name/tag of the appropriate builder container (which actually makes sense, for building)
builder_image = get_builder_image(args)
logging.info("Using build image '{0}' to launch shell inside {1}".format(builder_image, args.root))
# Find our setup scripts here (so we can test them manually on target)
setup_dir = os.path.realpath(os.path.join(RESOURCE_DIR, "build"))
# Run the builder container with bash as the entry point
cname = "builder-tool-shell-{0}".format(os.getpid())
docker_args = [
'docker', 'run', '--rm', '-it',
'--name', cname,
'-v', '%s:/work' % os.path.realpath(args.root),
'-v', '%s:/setup:ro' % setup_dir,
'-e', 'WORKSPACE=/work',
'-e', 'SETUP=/setup',
'-u' 'builder',
builder_image, '/bin/bash']
os.execvp(docker_args[0], docker_args)
def do_build(args):
'''Configure and build a set of Chromium build targets within the context of a source tree.
Requires at least a `tool.py checkout` first.
'''
# We need Docker available
docker_test()
# Look up the current HEAD of the root git checkout (<args.root>/src/.git/HEAD)
# (this lets us look up a per-commit builder container image)
head_path = os.path.join(args.root, "src", ".git", "HEAD")
if os.path.isfile(head_path):
with open(head_path, "r", encoding="utf8") as fd:
args.commit = fd.read().strip()
else:
logging.error("Not a valid Chromium source tree (no such file '{0}')! Do you need to `checkout` first?".format(head_path))
sys.exit(1)
# Get the name/tag of the appropriate builder container (which actually makes sense, for building)
builder_image = get_builder_image(args)
logging.info("Using build image '{0}' to build '{1}' inside {2}".format(builder_image, args.commit, args.root))
# Set up the build/output directory
build_dir = os.path.realpath(os.path.join(args.root, 'src', args.subdir))
if not build_dir.startswith(args.root):
logging.error("Build/output directory ({0}) must be somewhere inside Chromium source tree!".format(args.subdir))
sys.exit(1)
os.makedirs(build_dir, exist_ok=True)
logging.info("Using '{0}' as build/output directory".format(build_dir))
# Generate the options file (args.gn)
options = dict(o.split('=') for o in args.options)
args_gn = os.path.join(build_dir, "args.gn")
with open(args_gn, "w", encoding="utf8") as fd:
for key, value in options.items():
if value:
print("{0}={1}".format(key, value), file=fd)
logging.info("Configuration placed in {0}".format(args_gn))
# Expand magic targets (if any)
targets = []
for t in args.targets:
if t in MAGIC_TARGETS:
targets += MAGIC_TARGETS[t]
elif t.startswith('@'):
logging.warning("Unknown magic target '{0}'; ninja will probably not like this!".format(t))
targets.append(t)
else:
targets.append(t)
logging.info("Build targets: {}".format(targets))
# Optionally halt here...
if args.dryrun:
logging.info("Dry run--stopping here...")
sys.exit(0)
# Find our setup scripts here
setup_dir = os.path.realpath(os.path.join(RESOURCE_DIR, "build"))
# Run the builder container entry script with the clean-flag/directory/targets
cname = "builder-tool-build-{0}".format(os.getpid())
cbuild_dir = os.path.join("/work", os.path.relpath(build_dir, args.root))
cargs = []
if args.clean:
cargs.append("--clean")
if args.idl:
cargs.append("--idl")
cargs.append(cbuild_dir)
cargs += targets
docker_run_builder(cname, builder_image,
"/setup/entry.sh", *cargs,
work_mount=args.root,
setup_mount=setup_dir)
# Clean up the dangling container itself (no need to snapshot first here)
logging.info("Deleting container {0}".format(cname))
subprocess.check_call(['docker', 'rm', cname])
def do_install(args):
'''Install build artifacts from a Chromium source tree output directory into a Docker container image.
'''
# We need Docker available
docker_test()
# Find the build directory
build_dir = os.path.join(args.root, "src", args.subdir)
if not os.path.isdir(build_dir):
logging.error("'{0}' is not a build directory!".format(build_dir))
sys.exit(1)
artifacts = [os.path.join(build_dir, f) for f in BASELINE_ARTIFACTS]
# Find out if this will be a deb-based install
debs = glob.glob(os.path.join(build_dir, "*.deb"))
if debs:
debs.sort(key=lambda f: os.path.getmtime(f), reverse=True)
newest_deb = debs[0]
artifacts.append(newest_deb)
package_name = os.path.splitext(os.path.basename(newest_deb))[0]
setup_dir = os.path.realpath(os.path.join(RESOURCE_DIR, "install_deb"))
else:
logging.warning("No .deb files found in build directory; proceeding with no-deb install...")
package_name = "test_vv8"
setup_dir = os.path.realpath(os.path.join(RESOURCE_DIR, "install_nodeb"))
target_image = args.tag.replace("{package}", package_name)
docker_file = os.path.join(setup_dir, "Dockerfile")
artifacts.append(docker_file)
for a in args.artifacts:
artifacts += glob.glob(os.path.join(build_dir, a))
artifacts = list(set(artifacts))
logging.debug(artifacts)
# Create a temp directory in which to dump the Dockerfile and all artifacts
with tempfile.TemporaryDirectory() as scratch_dir:
# Dump artifacts
for f in set(artifacts):
shutil.copy(f, scratch_dir)
# Trigger docker build off the scratch-directory contents
docker_args = [
'docker', 'build',
'-t', target_image,
'--build-arg', "BASE_IMAGE={0}".format(args.base),
'--build-arg', "ARTIFACT_DIR={0}".format(args.artifact_dest),
'--build-arg', "PACKAGE_NAME={0}".format(package_name),
'--build-arg', "RUN_USER={0}".format(args.run_user),
scratch_dir]
logging.debug(docker_args)
subprocess.check_call(docker_args)
def main(argv):
# Root/global command options
ap = argparse.ArgumentParser(description="VisibleV8 build tool")
ap.add_argument('-l', '--log-level', dest="log_level", metavar="LEVEL", choices=[
"DEBUG", "INFO", "WARNING", "ERROR"
], default="INFO", help="Set logging level to LEVEL")
ap.add_argument('-d', '--directory', dest="root", metavar="PATH",
default=os.getcwd(),
help="Work on (or create) a Chromium source tree rooted inside PATH")
ap.set_defaults(handler=None)
subs = ap.add_subparsers()
# "checkout" command
p_checkout = subs.add_parser('checkout', aliases=['co'],
help="Check out/sync up a Chromium source tree to a particular commit/version")
p_checkout.add_argument('commit',
metavar="HASH",
help="Chromium project commit hash")
p_checkout.set_defaults(handler=do_checkout)
# "shell" command
p_shell = subs.add_parser('shell', aliases=['sh'],
help="Launch /bin/bash inside the builder container with the workspace mounted")
p_shell.set_defaults(handler=do_shell)
# "build" command
p_build = subs.add_parser('build', aliases=['b'],
help="Configure and build selected targets in a Chromium source tree")
p_build.set_defaults(options=RELEASE_OPTIONS)
p_build.add_argument('-s', '--sub-directory', metavar="PATH", dest="subdir", default='out/Builder',
help="Perform build inside PATH (relative to Chromium project root)")
p_build.add_argument('-o', '--option', metavar='NAME=VALUE', dest='options', action='append',
help="Add NAME=VALUE to the list of build options")
p_build.add_argument('--debug', dest='options', action='store_const', const=DEBUG_OPTIONS,
help="Use standard debug-build options")
p_build.add_argument('--release', dest='options', action='store_const', const=RELEASE_OPTIONS,
help="Use standard release-build options [the default]")
p_build.add_argument('--dry-run', dest='dryrun', action='store_true', default=False,
help="Stop before launching build (leaving configuration/etc in place)")
p_build.add_argument('-c', '--clean', dest='clean', action='store_true', default=False,
help="Clean before building (default: False)")
p_build.add_argument('-i', '--idl', dest='idl', action='store_true', default=False,
help="Parse Chromium's WebIDL data to produce an 'idldata.json' dump")
p_build.add_argument('targets', metavar="TARGET", nargs="+",
help="One or more TARGETs to build in the Chromium project")
p_build.set_defaults(handler=do_build)
# "install" command
p_install = subs.add_parser('install', aliases=['in'],
help="Install Chromium build artifacts into a Docker container image")
p_install.add_argument('-s', '--sub-directory', metavar="PATH", dest="subdir", default='out/Builder',
help="Pull artifacts from build directory PATH inside Chrome source tree")
p_install.add_argument('-b', '--base', metavar="DOCKER_IMAGE", default="node:lts-jessie",
help="Use DOCKER_IMAGE as the base of the new image")
p_install.add_argument('--artifact-dest', metavar="PATH", dest="artifact_dest", default="/artifacts",
help="Place all copied artifacts in PATH inside the new image (default: /artifacts)")
p_install.add_argument('-t', '--tag', metavar="IMAGE:TAG", default="{package}:latest",
help='''\
Name the output image IMAGE:TAG
(default "{package}:latest"; the string "{package}" is
replaced with the base name of the installed DEB package)
''')
p_install.add_argument('-u', '--run-user', metavar="USER", default="node",
help="Run container entry point as USER")
p_install.add_argument('artifacts', metavar="GLOB", nargs='*',
help="Copy additional artifacts matched by GLOB from build directory to installed image")
p_install.set_defaults(handler=do_install)
# MAIN ENTRY LOGIC:
###################
args = ap.parse_args(argv[1:])
args.root = os.path.realpath(args.root)
assert os.path.isdir(args.root)
logging.basicConfig(level=getattr(logging, args.log_level))
if args.handler:
args.handler(args)
else:
ap.print_help()
if __name__ == "__main__":
main(sys.argv)
|
<filename>plga_evaluate_bleu_score.py
import os
import common as cm
import sacrebleu
import nmt_data_prep as nmtdp
import plga_transformer_run_model as plga_run
def evaluate_bleu(model_dict,
model_name,
model_type,
src_lang='pt',
tgt_lang='en',
dataset_file='ted_hrlr_translate/pt_to_en',
revert_order='False',
inp_obj=None,
chkpt_path="./model_saves/",
data_path="./model_data/",
load_ckpt='train',
tok_model_name="./ted_hrlr_translate_pt_en_tokenizer",
max_length=50,
ds_max_length=None,
verbose=False):
'''
Evaluate bleu with transformer model from dataset using greedy search.
Args:
model_dict: model parameters for transformer.
model_name: model name
model_type: "train" or "val"
src_lang: source language abbreviation as string
tgt_lang: target language abbreviation as string
dataset_file: path to tensorflow dataset
revert_order: If True, it reverts the order of language pairs in dataset_file. Reverted order should match
src_lang/tgt_lang assignment.
inp_obj: dataset object if it was already created.
bleu_ref_filepath: file path for bleu references to be loaded if exists or to be saved.
chkpt_path: path where model checkpoints can be loaded from.
data_path: path to save model date or load from
load_ckpt:'train' or 'val' checkpoint to load from.
tok_model_name: file path for tokenizer model.
max_length: Offset for Maximum iteration to complete for predicting tokens.
ds_max_length: Maximum token length for filtering sentences in dataset. Set to None for no filtering.
verbose: If True print out more details.
'''
if inp_obj is None:
print("Getting Inputs")
inp_obj=nmtdp.src_tgt_data_prep(src_lang=src_lang,
tgt_lang=tgt_lang,
BUFFER_SIZE=20000,
BATCH_SIZE = 64,
dataset_file=dataset_file,
load_dataset=True,
train_percent=None,
model_name = tok_model_name,
revert_order=revert_order,
shuffle_set=True,
shuffle_files=True,
MAX_LENGTH=ds_max_length,
verbose=verbose)
print("Initializing model e2e object")
e2e_obje = plga_run.plga_transformer_e2e(inp_obj.tokenizers_src, inp_obj.tokenizers_tgt,
checkpoint_path=chkpt_path,
hpdict=model_dict,
load_ckpt=load_ckpt
)
test_inputs, test_refs=[],[]
for src, tgt in inp_obj.test_examples:
test_inputs.append(src.numpy().decode('utf-8'))
test_refs.append(tgt.numpy().decode('utf-8'))
dsmaxlen= ds_max_length if ds_max_length is not None else "_unpadded"
model_name1 = f"{model_type}_{model_name}_greedy_evallen{max_length}_dslen{dsmaxlen}"
model_pred_path=os.path.join(data_path,f"predictions_{model_name1}.pkl")
if not os.path.exists(model_pred_path):
print("Predicting test sentences")
pred_sent_lst, pred_tok_lst=e2e_obje.evaluate_test(test_inputs, test_refs,
max_length=max_length,
filename=model_pred_path, verbose=verbose)
else:
print("Loading predictions from file", model_pred_path)
pred_sent_lst, pred_tok_lst=cm.pklload(model_pred_path)
bleu_ref_filepath = os.path.join(data_path, f"ref_bleu_test_{model_name1}.pkl")
if not os.path.exists(bleu_ref_filepath):
print("saving reference test sentences for target")
corpus_test_refs = [test_refs]
cm.pklsave(bleu_ref_filepath, corpus_test_refs)
else:
corpus_test_refs = cm.pklload(bleu_ref_filepath)
model_bleu_score = sacrebleu.corpus_bleu(pred_sent_lst, corpus_test_refs)
print("corpus bleu score:", model_bleu_score)
return model_bleu_score
def beam_evaluate_bleu(model_dict,
beam_size,
model_name,
model_type,
src_lang='pt',
tgt_lang='en',
dataset_file='ted_hrlr_translate/pt_to_en',
revert_order='False',
inp_obj=None,
chkpt_path="./model_saves/",
data_path="./model_data/",
load_ckpt='train',
tok_model_name="./ted_hrlr_translate_pt_en_tokenizer",
max_length=50,
ds_max_length=None,
verbose=False):
'''
Evaluate bleu with transformer model from dataset using beam search.
Args:
model_dict: model parameters for transformer.
beam_size: beam length for beam search.
model_name: model name
model_type: "train" or "val"
src_lang: source language abbreviation as string
tgt_lang: target language abbreviation as string
dataset_file: path to tensorflow dataset
revert_order: If True, it reverts the order of language pairs in dataset_file. Reverted order should match
src_lang/tgt_lang assignment.
inp_obj: dataset object if it was already created.
bleu_ref_filepath: file path for bleu references to be loaded if exists or to be saved.
chkpt_path: path where model checkpoints can be loaded from.
data_path: path to save model date or load from
load_ckpt:'train' or 'val' checkpoint to load from.
tok_model_name: file path for tokenizer model.
max_length: Offset for Maximum iteration to complete for predicting tokens.
ds_max_length: Maximum token length for filtering sentences in dataset.
smoothing_function: Smoothing method for bleu score. Default is None (smooth.method0()).
verbose: If True print out more details.
Returns bleu score and a list of sentences not predicted if any.
Saves the predicted sentences and tokens in a file.
'''
if inp_obj is None:
print("Getting Inputs")
inp_obj=nmtdp.src_tgt_data_prep(src_lang=src_lang,
tgt_lang=tgt_lang,
BUFFER_SIZE=20000,
BATCH_SIZE = 64,
dataset_file=dataset_file,
load_dataset=True,
train_percent=None,
model_name = tok_model_name,
revert_order=revert_order,
shuffle_set=True,
shuffle_files=False,
MAX_LENGTH=ds_max_length,
verbose=verbose)
print("Initializing model e2e object")
e2e_obje = plga_run.plga_transformer_e2e(inp_obj.tokenizers_src, inp_obj.tokenizers_tgt,
checkpoint_path=chkpt_path,
hpdict=model_dict,
load_ckpt=load_ckpt
)
test_inputs, test_refs=[],[]
for src, tgt in inp_obj.test_examples:
test_inputs.append(src.numpy().decode('utf-8'))
test_refs.append(tgt.numpy().decode('utf-8'))
dsmaxlen= ds_max_length if ds_max_length is not None else "_unpadded"
model_name1 = f"{model_type}_{model_name}_beamsize{beam_size}_evallen{max_length}_dslen{dsmaxlen}"
model_pred_path=os.path.join(data_path,f"predictions_{model_name1}.pkl")
if not os.path.exists(model_pred_path):
print("Predicting test sentences")
pred_sent_lst_fullbeam, pred_tok_lst, final_beam_seq, unpreds=e2e_obje.beam_evaluate_test(test_inputs, test_refs, beam_size=beam_size,
max_length=max_length, filename=model_pred_path, verbose=verbose)
else:
print("Loading predictions from file", model_pred_path)
pred_sent_lst_fullbeam, pred_tok_lst, final_beam_seq, unpreds=cm.pklload(model_pred_path)
#get the highest probable sentence in pred_sent_lst for bleu evaluation
pred_sent_lst=[]
k=0
for i, pred_sent_beam in enumerate(pred_sent_lst_fullbeam):
if len(pred_sent_beam) > 0:
pred_sent_lst.append(pred_sent_beam[0])
else:
print(f"{i+1}th test sentence was empty: {unpreds[k]}")
print("")
k+=1
bleu_ref_filepath = os.path.join(data_path, f"ref_bleu_test_{model_name1}.pkl")
if not os.path.exists(bleu_ref_filepath):
print("saving reference test sentences for target")
corpus_test_refs = [test_refs]
cm.pklsave(bleu_ref_filepath, corpus_test_refs)
else:
corpus_test_refs = cm.pklload(bleu_ref_filepath)
model_bleu_score = sacrebleu.corpus_bleu(pred_sent_lst, corpus_test_refs)
print("corpus bleu score:", model_bleu_score)
return model_bleu_score, unpreds |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script builds the domain specific language model for the Public Transport Info domain (Czech)
The training procedure is as follows:
#. Append bootstrap text, possibly handwritten, to the text extracted from the indomain data.
#. Build a class based language model using the data generated in the previous step.
#. Score the general (domain independent) data.
#. Select 1M sentences with lowest perplexity given the class based language model.
#. Append the selected sentences to the training data generated in the 1. step.
#. Re-build the class based language model.
"""
if __name__ == '__main__':
import autopath
import os
import xml.dom.minidom
import glob
import codecs
import random
import alex.corpustools.lm as lm
import alex.utils.various as various
from alex.corpustools.text_norm_cs import normalise_text, exclude_lm
from alex.corpustools.wavaskey import save_wavaskey
def is_srilm_available():
"""Test whether SRILM is available in PATH."""
return os.system("which ngram-count") == 0
def require_srilm():
"""Test whether SRILM is available in PATH, try to import it from env
variable and exit the program in case there are problems with it."""
if not is_srilm_available():
if 'SRILM_PATH' in os.environ:
srilm_path = os.environ['SRILM_PATH']
os.environ['PATH'] += ':%s' % srilm_path
if not is_srilm_available():
print 'SRILM_PATH you specified does not contain the ' \
'utilities needed. Please make sure you point to the ' \
'directory with the SRILM binaries.'
exit(1)
else:
print 'SRILM not found. Set SRILM_PATH environment variable to ' \
'the path with SRILM binaries.'
exit(1)
def exit_on_system_fail(cmd, msg=None):
system_res = os.system(cmd)
if not system_res == 0:
err_msg = "Command failed, exitting."
if msg:
err_msg = "%s %s" % (err_msg, msg, )
raise Exception(err_msg)
if __name__ == '__main__':
# Test if SRILM is available.
require_srilm()
train_data_size = 0.90
bootstrap_text = "bootstrap.txt"
classes = "../data/database_SRILM_classes.txt"
indomain_data_dir = "indomain_data"
gen_data = lm.download_general_LM_data('cs')
fn_pt_trn = "reference_transcription_trn.txt"
fn_pt_dev = "reference_transcription_dev.txt"
gen_data_norm = '01_gen_data_norm.txt.gz'
indomain_data_text_trn = "04_indomain_data_trn.txt"
indomain_data_text_trn_norm = "04_indomain_data_trn_norm.txt"
indomain_data_text_dev = "05_indomain_data_dev.txt"
indomain_data_text_dev_norm = "05_indomain_data_dev_norm.txt"
indomain_data_text_trn_norm_vocab = "06_indomain_data_trn_norm.txt.vocab"
indomain_data_text_trn_norm_count1 = "06_indomain_data_trn_norm.txt.count1"
indomain_data_text_trn_norm_pg_arpa = "06_indomain_data_trn_norm.txt.pg.arpa"
indomain_data_text_trn_norm_cls = "07_indomain_data_trn_norm_cls.txt"
indomain_data_text_trn_norm_cls_classes = "07_indomain_data_trn_norm_cls.classes"
indomain_data_text_trn_norm_cls_vocab = "07_indomain_data_trn_norm_cls.vocab"
indomain_data_text_trn_norm_cls_count1 = "07_indomain_data_trn_norm_cls.count1"
indomain_data_text_trn_norm_cls_pg_arpa = "07_indomain_data_trn_norm_cls.pg.arpa"
indomain_data_text_trn_norm_cls_pg_arpa_scoring = "10_indomain_data_trn_norm_cls.pg.arpa.gen_scoring.gz"
gen_data_norm_selected = '11_gen_data_norm.selected.txt'
extended_data_text_trn_norm = "20_extended_data_trn_norm.txt"
extended_data_text_trn_norm_cls = "20_extended_data_trn_norm_cls.txt"
extended_data_text_trn_norm_cls_classes = "20_extended_data_trn_norm_cls.classes"
extended_data_text_trn_norm_cls_vocab = "20_extended_data_trn_norm_cls.vocab"
extended_data_text_trn_norm_cls_count1 = "20_extended_data_trn_norm_cls.count1"
extended_data_text_trn_norm_cls_pg_arpa = "20_extended_data_trn_norm_cls.pg.arpa"
extended_data_text_trn_norm_cls_pg_arpa_filtered = "25_extended_data_trn_norm_cls.filtered.pg.arpa"
expanded_lm_vocab = "26_expanded.vocab"
expanded_lm_pg = "26_expanded.pg.arpa"
mixing_weight = "0.8"
mixed_lm_vocab = "27_mixed.vocab"
mixed_lm_pg = "27_mixed.pg.arpa"
final_lm_vocab = "final.vocab"
final_lm_pg = "final.pg.arpa"
final_lm_qg = "final.qg.arpa"
final_lm_tg = "final.tg.arpa"
final_lm_bg = "final.bg.arpa"
final_lm_dict = "final.dict"
final_lm_dict_sp_sil = "final.dict.sp_sil"
print
print "Data for the general language model:", gen_data
print "-"*120
###############################################################################################
if not os.path.exists(gen_data_norm):
print "Normalizing general data"
print "-"*120
###############################################################################################
cmd = r"zcat %s | iconv -f UTF-8 -t UTF-8//IGNORE | sed 's/\. /\n/g' | sed 's/[[:digit:]]/ /g; s/[^[:alnum:]]/ /g; s/[ˇ]/ /g; s/ \+/ /g' | sed 's/[[:lower:]]*/\U&/g' | sed s/[\%s→€…│]//g | gzip > %s" % \
(gen_data,
"'",
gen_data_norm)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(indomain_data_text_trn_norm):
print "Generating train and dev data"
print "-"*120
###############################################################################################
files = []
files.append(glob.glob(os.path.join(indomain_data_dir, 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', '*', 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', '*', '*', 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', '*', '*', '*', 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', '*', '*', '*', '*', 'asr_transcribed.xml')))
files = various.flatten(files)
tt = []
pt = []
for fn in files:
# print "Processing:", fn
doc = xml.dom.minidom.parse(fn)
turns = doc.getElementsByTagName("turn")
for turn in turns:
recs_list = turn.getElementsByTagName("rec")
trans_list = turn.getElementsByTagName("asr_transcription")
if trans_list:
trans = trans_list[-1]
t = various.get_text_from_xml_node(trans)
t = normalise_text(t)
if exclude_lm(t):
continue
# The silence does not have a label in the language model.
t = t.replace('_SIL_', '')
tt.append(t)
wav_file = recs_list[0].getAttribute('fname')
wav_path = os.path.realpath(os.path.join(os.path.dirname(fn), wav_file))
pt.append((wav_path, t))
random.seed(10)
sf = [(a, b) for a, b in zip(tt, pt)]
random.shuffle(sf)
sf_train = sorted(sf[:int(train_data_size*len(sf))], key=lambda k: k[1][0])
sf_dev = sorted(sf[int(train_data_size*len(sf)):], key=lambda k: k[1][0])
t_train = [a for a, b in sf_train]
pt_train = [b for a, b in sf_train]
t_dev = [a for a, b in sf_dev]
pt_dev = [b for a, b in sf_dev]
with codecs.open(indomain_data_text_trn,"w", "UTF-8") as w:
w.write('\n'.join(t_train))
with codecs.open(indomain_data_text_dev,"w", "UTF-8") as w:
w.write('\n'.join(t_dev))
save_wavaskey(fn_pt_trn, dict(pt_train))
save_wavaskey(fn_pt_dev, dict(pt_dev))
# train data
cmd = r"cat %s %s | iconv -f UTF-8 -t UTF-8//IGNORE | sed 's/\. /\n/g' | sed 's/[[:digit:]]/ /g; s/[^[:alnum:]_]/ /g; s/[ˇ]/ /g; s/ \+/ /g' | sed 's/[[:lower:]]*/\U&/g' | sed s/[\%s→€…│]//g > %s" % \
(bootstrap_text,
indomain_data_text_trn,
"'",
indomain_data_text_trn_norm)
print cmd
exit_on_system_fail(cmd)
# dev data
cmd = r"cat %s | iconv -f UTF-8 -t UTF-8//IGNORE | sed 's/\. /\n/g' | sed 's/[[:digit:]]/ /g; s/[^[:alnum:]_]/ /g; s/[ˇ]/ /g; s/ \+/ /g' | sed 's/[[:lower:]]*/\U&/g' | sed s/[\%s→€…│]//g > %s" % \
(indomain_data_text_dev,
"'",
indomain_data_text_dev_norm)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(indomain_data_text_trn_norm_cls_pg_arpa):
print "Generating class-based 5-gram language model from trn in-domain data"
print "-"*120
###############################################################################################
# convert surface forms to classes
cmd = r"[ -e %s ] && replace-words-with-classes addone=10 normalize=1 outfile=%s classes=%s %s > %s || exit 1" % \
(classes,
indomain_data_text_trn_norm_cls_classes,
classes,
indomain_data_text_trn_norm,
indomain_data_text_trn_norm_cls)
print cmd
exit_on_system_fail(cmd, "Maybe you forgot to run "
"'../data/database.py build'?")
cmd = "ngram-count -text %s -write-vocab %s -write1 %s -order 5 -wbdiscount -memuse -lm %s" % \
(indomain_data_text_trn_norm_cls,
indomain_data_text_trn_norm_cls_vocab,
indomain_data_text_trn_norm_cls_count1,
indomain_data_text_trn_norm_cls_pg_arpa)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(indomain_data_text_trn_norm_pg_arpa):
print
print "Generating full 5-gram in-domain language model from in-domain data"
print "-"*120
cmd = "ngram-count -text %s -write-vocab %s -write1 %s -order 5 -wbdiscount -memuse -lm %s" % \
(indomain_data_text_trn_norm,
indomain_data_text_trn_norm_vocab,
indomain_data_text_trn_norm_count1,
indomain_data_text_trn_norm_pg_arpa)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(indomain_data_text_trn_norm_cls_pg_arpa_scoring):
print
print "Scoring general text data using the in-domain language model"
print "-"*120
###############################################################################################
exit_on_system_fail("ngram -lm %s -classes %s -order 5 -debug 1 -ppl %s | gzip > %s" % \
(indomain_data_text_trn_norm_cls_pg_arpa,
indomain_data_text_trn_norm_cls_classes,
gen_data_norm,
indomain_data_text_trn_norm_cls_pg_arpa_scoring))
if not os.path.exists(gen_data_norm_selected):
print
print "Selecting similar sentences to in-domain data from general text data"
print "-"*120
###############################################################################################
exit_on_system_fail("zcat %s | ../../../corpustools/srilm_ppl_filter.py > %s " % (indomain_data_text_trn_norm_cls_pg_arpa_scoring, gen_data_norm_selected))
if not os.path.exists(extended_data_text_trn_norm_cls_pg_arpa):
print
print "Training the in-domain model on the extended data"
print "-"*120
###############################################################################################
cmd = r"cat %s %s > %s" % (indomain_data_text_trn_norm, gen_data_norm_selected, extended_data_text_trn_norm)
# cmd = r"cat %s > %s" % (indomain_data_text_trn_norm, extended_data_text_trn_norm)
print cmd
exit_on_system_fail(cmd)
# convert surface forms to classes
cmd = r"[ -e %s ] && replace-words-with-classes addone=10 normalize=1 outfile=%s classes=%s %s > %s || exit 1" % \
(classes,
extended_data_text_trn_norm_cls_classes,
classes,
extended_data_text_trn_norm,
extended_data_text_trn_norm_cls)
print cmd
exit_on_system_fail(cmd, "Maybe you forgot to run "
"'../data/database.py build'?")
cmd = "ngram-count -text %s -vocab %s -limit-vocab -write-vocab %s -write1 %s -order 5 -wbdiscount -memuse -lm %s" % \
(extended_data_text_trn_norm_cls,
indomain_data_text_trn_norm_cls_vocab,
extended_data_text_trn_norm_cls_vocab,
extended_data_text_trn_norm_cls_count1,
extended_data_text_trn_norm_cls_pg_arpa)
print cmd
exit_on_system_fail(cmd)
cmd = "cat %s | grep -v 'CL_[[:alnum:]_]\+[[:alnum:] _]\+CL_'> %s" % \
(extended_data_text_trn_norm_cls_pg_arpa,
extended_data_text_trn_norm_cls_pg_arpa_filtered)
print cmd
exit_on_system_fail(cmd)
cmd = "ngram -lm %s -order 5 -write-lm %s -renorm" % \
(extended_data_text_trn_norm_cls_pg_arpa_filtered,
extended_data_text_trn_norm_cls_pg_arpa_filtered)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(expanded_lm_pg):
print
print "Expanding the language model"
print "-"*120
###############################################################################################
cmd = "ngram -lm %s -classes %s -order 5 -expand-classes 5 -write-vocab %s -write-lm %s -prune 0.0000001 -renorm" \
% (extended_data_text_trn_norm_cls_pg_arpa_filtered,
extended_data_text_trn_norm_cls_classes,
expanded_lm_vocab,
expanded_lm_pg)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(mixed_lm_pg):
print
print "Mixing the expanded class-based model and the full model"
print "-"*120
###############################################################################################
cmd = "ngram -lm %s -mix-lm %s -lambda %s -order 5 -write-vocab %s -write-lm %s -prune 0.00000001 -renorm" \
% (expanded_lm_pg,
indomain_data_text_trn_norm_pg_arpa,
mixing_weight,
mixed_lm_vocab,
mixed_lm_pg)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(final_lm_pg):
print
print "Building the final language models"
print "-"*120
###############################################################################################
cmd = "ngram -lm %s -order 5 -write-lm %s -prune-lowprobs -prune 0.0000001 -renorm" \
% (mixed_lm_pg,
final_lm_pg)
print cmd
exit_on_system_fail(cmd)
cmd = "ngram -lm %s -order 4 -write-lm %s -prune-lowprobs -prune 0.0000001 -renorm" \
% (mixed_lm_pg,
final_lm_qg)
print cmd
exit_on_system_fail(cmd)
cmd = "ngram -lm %s -order 3 -write-lm %s -prune-lowprobs -prune 0.0000001 -renorm" \
% (mixed_lm_pg,
final_lm_tg)
print cmd
exit_on_system_fail(cmd)
cmd = "ngram -lm %s -order 2 -write-lm %s -prune-lowprobs -prune 0.0000001 -renorm" \
% (mixed_lm_pg,
final_lm_bg)
print cmd
exit_on_system_fail(cmd)
cmd = "cat %s | grep -v '\-pau\-' | grep -v '<s>' | grep -v '</s>' | grep -v '<unk>' | grep -v 'CL_' | grep -v '{' | grep -v '_' > %s" % \
(mixed_lm_vocab,
final_lm_vocab)
print cmd
exit_on_system_fail(cmd)
cmd = "echo '' > {dict}".format(dict=final_lm_dict)
print cmd
exit_on_system_fail(cmd)
cmd = "perl ../../../tools/htk/bin/PhoneticTranscriptionCS.pl %s %s" % \
(final_lm_vocab,
final_lm_dict)
print cmd
exit_on_system_fail(cmd)
###############################################################################################
print
print "Test language models"
print "-"*120
print "Class-based trn 5-gram LM on trn data."
print "-"*120
exit_on_system_fail("ngram -lm %s -classes %s -order 5 -ppl %s" % (indomain_data_text_trn_norm_cls_pg_arpa,
indomain_data_text_trn_norm_cls_classes,
indomain_data_text_trn_norm))
print
print "-"*120
print "Full trn 5-gram LM on trn data."
print "-"*120
exit_on_system_fail("ngram -lm %s -order 5 -ppl %s" % (indomain_data_text_trn_norm_pg_arpa, indomain_data_text_trn_norm))
print
print
print "-"*120
print "Class-based trn 5-gram LM on dev data."
print "-"*120
exit_on_system_fail("ngram -lm %s -classes %s -order 5 -ppl %s -zeroprob-word _NOISE_" % (indomain_data_text_trn_norm_cls_pg_arpa,
indomain_data_text_trn_norm_cls_classes,
indomain_data_text_dev_norm))
print
print "-"*120
print "Extended class-based trn 5-gram LM on dev data."
print "-"*120
exit_on_system_fail("ngram -lm %s -classes %s -order 5 -ppl %s -zeroprob-word _NOISE_" % (extended_data_text_trn_norm_cls_pg_arpa,
extended_data_text_trn_norm_cls_classes,
indomain_data_text_dev_norm))
print
print "-"*120
print "Extended filtered class-based trn 5-gram LM on dev data."
print "-"*120
exit_on_system_fail("ngram -lm %s -classes %s -order 5 -ppl %s -zeroprob-word _NOISE_" % (extended_data_text_trn_norm_cls_pg_arpa_filtered,
extended_data_text_trn_norm_cls_classes,
indomain_data_text_dev_norm))
print
print "-"*120
print "Expanded class-based trn 5-gram LM on dev data."
print "-"*120
exit_on_system_fail("ngram -lm %s -order 5 -ppl %s -zeroprob-word _NOISE_" % (expanded_lm_pg, indomain_data_text_dev_norm))
print
print "-"*120
print "Full trn 5-gram LM on dev data."
print "-"*120
exit_on_system_fail("ngram -lm %s -order 5 -ppl %s" % (indomain_data_text_trn_norm_pg_arpa, indomain_data_text_dev_norm))
print
print "-"*120
print "Mixed trn 5-gram LM on dev data."
print "-"*120
exit_on_system_fail("ngram -lm %s -order 5 -ppl %s" % (mixed_lm_pg, indomain_data_text_dev_norm))
print
print "-"*120
print "Final 5-gram LM on dev data."
print "-"*120
exit_on_system_fail("ngram -lm %s -order 5 -ppl %s" % (final_lm_pg, indomain_data_text_dev_norm))
print
print "-"*120
print "Final 4-gram LM on dev data."
print "-"*120
exit_on_system_fail("ngram -lm %s -order 4 -ppl %s" % (final_lm_qg, indomain_data_text_dev_norm))
print
print "-"*120
print "Final 3-gram LM on dev data."
print "-"*120
exit_on_system_fail("ngram -lm %s -ppl %s" % (final_lm_tg, indomain_data_text_dev_norm))
print
print "-"*120
print "Final 2-gram LM on dev data."
print "-"*120
exit_on_system_fail("ngram -lm %s -ppl %s" % (final_lm_bg, indomain_data_text_dev_norm))
print
|
# coding: utf-8
# Python libs
from __future__ import absolute_import
import datetime
# Salt libs
from salt.beacons import telegram_bot_msg
# Salt testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
# Third-party libs
try:
import telegram
HAS_TELEGRAM = True
except ImportError:
HAS_TELEGRAM = False
import logging
log = logging.getLogger(__name__)
@skipIf(not HAS_TELEGRAM, 'telegram is not available')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class TelegramBotMsgBeaconTestCase(TestCase, LoaderModuleMockMixin):
'''
Test case for salt.beacons.telegram_bot
'''
def setup_loader_modules(self):
return {telegram_bot_msg: {}}
def test_validate_empty_config(self, *args, **kwargs):
ret = telegram_bot_msg.validate(None)
self.assertEqual(ret, (False, ('Configuration for telegram_bot_msg '
'beacon must be a list.')))
def test_validate_missing_accept_from_config(self, *args, **kwargs):
ret = telegram_bot_msg.validate([{
'token': 'bcd'
}])
self.assertEqual(ret, (False, ('Not all required configuration for '
'telegram_bot_msg are set.')))
def test_validate_missing_token_config(self, *args, **kwargs):
ret = telegram_bot_msg.validate([{
'accept_from': []
}])
self.assertEqual(ret, (False, ('Not all required configuration for '
'telegram_bot_msg are set.')))
def test_validate_config_not_list_in_accept_from(self, *args, **kwargs):
ret = telegram_bot_msg.validate([{
'token': 'bcd',
'accept_from': {'nodict': "1"}
}])
self.assertEqual(ret, (False, ('Configuration for telegram_bot_msg, '
'accept_from must be a list of '
'usernames.')))
def test_validate_valid_config(self, *args, **kwargs):
ret = telegram_bot_msg.validate([{
'token': 'bcd',
'accept_from': [
'username'
]
}])
self.assertEqual(ret, (True, 'Valid beacon configuration.'))
def test_call_no_updates(self):
with patch("salt.beacons.telegram_bot_msg.telegram") as telegram_api:
token = 'abc'
config = [{
'token': token,
'accept_from': ['tester']
}]
inst = MagicMock(name='telegram.Bot()')
telegram_api.Bot = MagicMock(name='telegram', return_value=inst)
inst.get_updates.return_value = []
ret = telegram_bot_msg.beacon(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
telegram_api.Bot.assert_called_once_with(token)
self.assertEqual(ret, [])
def test_call_telegram_return_no_updates_for_user(self):
with patch("salt.beacons.telegram_bot_msg.telegram") as telegram_api:
token = 'abc'
username = 'tester'
config = [{
'token': token,
'accept_from': [username]
}]
inst = MagicMock(name='telegram.Bot()')
telegram_api.Bot = MagicMock(name='telegram', return_value=inst)
log.debug('telegram {}'.format(telegram))
username = 'different_user'
user = telegram.user.User(id=1, first_name='', username=username)
chat = telegram.chat.Chat(1, 'private', username=username)
date = datetime.datetime(2016, 12, 18, 0, 0)
message = telegram.message.Message(1, user, date=date, chat=chat)
update = telegram.update.Update(update_id=1, message=message)
inst.get_updates.return_value = [update]
ret = telegram_bot_msg.beacon(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
telegram_api.Bot.assert_called_once_with(token)
self.assertEqual(ret, [])
def test_call_telegram_returning_updates(self):
with patch("salt.beacons.telegram_bot_msg.telegram") as telegram_api:
token = 'abc'
username = 'tester'
config = [{
'token': token,
'accept_from': [username]
}]
inst = MagicMock(name='telegram.Bot()')
telegram_api.Bot = MagicMock(name='telegram', return_value=inst)
user = telegram.User(id=1, first_name='', username=username)
chat = telegram.Chat(1, 'private', username=username)
date = datetime.datetime(2016, 12, 18, 0, 0)
message = telegram.Message(1, user, date=date, chat=chat)
update = telegram.update.Update(update_id=1, message=message)
inst.get_updates.return_value = [update]
ret = telegram_bot_msg.beacon(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
telegram_api.Bot.assert_called_once_with(token)
self.assertTrue(ret)
self.assertEqual(ret[0]['msgs'][0], message.to_dict())
|
import sys
import nltk
import sklearn
import pandas as pd
import numpy as np
df= pd.read_table('SMSSpamCollection',header= None, encoding='utf-8')
classes = df[0]
print(classes.value_counts())
#Preprocess the data
"""
0= ham
1=spam
for this we use label encoder
"""
from sklearn.preprocessing import LabelEncoder
encoder=LabelEncoder()
Y=encoder.fit_transform(classes)
#store the sms data
text_messages = df[1]
#replace email addresses with emailaddr
processed= text_messages.str.replace(r'^.+@[^\.].*\.[a-z]{2,}$','emailaddr')
#replace urls with webaddress
processed= processed.str.replace(r'^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$','webaddress')
#replace money symbols with 'moneysymb'
processed=processed.str.replace(r'£|\$','moneysymb')
#replace 10 digit number with 'phonenumber'
processed= processed.str.replace(r'^\(?[\d]{3}\)?[\s-]?[\d]{3}[\s-]?[\d]{4}$','phonenumber')
#replace normal numbers with 'numbr'
processed=processed.str.replace(r'\d+(\.\d+)?','numbr')
#remove punctuation
processed=processed.str.replace(r'[^\w\d\s]','')
processed=processed.str.replace(r'\s+',' ')
processed=processed.str.lower()
# remove stop words
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words=set(stopwords.words('english'))
processed=processed.apply(lambda x : ' '.join(term for term in x.split() if term not in stop_words ))
# Stemming - like,likes,liked ~like
ps=nltk.PorterStemmer()
processed=processed.apply(lambda x : ' '.join(ps.stem(term) for term in x.split()))
#Tokenizing
nltk.download('punkt')
from nltk.tokenize import word_tokenize
all_words=[]
for message in processed:
words=word_tokenize(message)
for w in words:
all_words.append(w)
all_words= nltk.FreqDist(all_words)
#print the total number of words and 15 most common words
'''
print('Number of words:{}'.format(len(all_words)))
print('Most Common Words:{}'.format(all_words.most_common(15)))
'''
#using the 1500 most common word as features
word_features=list(all_words.keys())[:1500]
#defining find a feature function
def find_features(message):
words=word_tokenize(message)
features={}
for word in word_features:
features[word]=(word in words)
return features
#example
features = find_features(processed[0])
for key,value in features.items():
if value == True:
print(key)
# zipper method for appending i/p - o/p
def zipper(x, y):
size = len(x) if len(x) < len(y) else len(y)
retList = []
for i in range(size):
retList.append((x[i], y[i]))
return retList
#find features for all this messages
messages = zipper(processed,Y)
#define a seed for reproductibility
seed=1
np.random.seed=seed
np.random.shuffle(messages)
featuresets=[(find_features(text),label) for (text,label) in messages]
#split training and testing data using sklearn
from sklearn import model_selection
training,testing = model_selection.train_test_split(featuresets,test_size=0.25,random_state=seed)
'''
print('Training: {}'.format(len(training)))
print('Testing: {}'.format(len(testing)))
'''
#Scikitlearn classifiers with nltk
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.metrics import classification_report,accuracy_score,confusion_matrix
#Define models to train and comparing best model on its accuracy
names=['K Nearest Neighbors','Decision Tree','Random Forest','Logistic Regression','SGD Classifier','Naive Bayes','SVM Linear']
classifiers=[
KNeighborsClassifier(),
DecisionTreeClassifier(),
RandomForestClassifier(),
LogisticRegression(),
SGDClassifier(max_iter=100),
MultinomialNB(),
SVC(kernel='linear')
]
models = zipper(names,classifiers)
#Wrap models in nltk and find their accuracy then select best method
from nltk.classify.scikitlearn import SklearnClassifier
for name,model in models:
nltk_model=SklearnClassifier(model)
nltk_model.train(training)
accuracy=nltk.classify.accuracy(nltk_model,testing)*100
print('{}: Accuracy: {}'.format(name,accuracy))
#ensemble method -- Voting Classifier for better accuracy
from sklearn.ensemble import VotingClassifier
names=['K Nearest Neighbors','Decision Tree','Random Forest','Logistic Regression','SGD Classifier','Naive Bayes','SVM Linear']
classifiers=[
KNeighborsClassifier(),
DecisionTreeClassifier(),
RandomForestClassifier(),
LogisticRegression(),
SGDClassifier(max_iter=100),
MultinomialNB(),
SVC(kernel='linear')
]
models = zipper(names,classifiers)
# n_jobs=-1 means all algo can run in parallel
nltk_ensemble= SklearnClassifier(VotingClassifier(estimators=models,voting='hard',n_jobs= -1))
nltk_ensemble.train(training)
accuracy=nltk.classify.accuracy(nltk_ensemble,testing)*100
print('Ensemble Method Accuracy: {}'.format(accuracy))
#make class label predictions
txt_features,labels=zip(*testing)
prediction = nltk_ensemble.classify_many(txt_features)
#print a confusion matrix and a classification report
print(classification_report(labels,prediction))
pd.DataFrame(
confusion_matrix(labels,prediction),
index=[['actual','actual'],['ham','spam']],
columns=[['predicted','predicted'],['ham','spam']]
)
|
<filename>SiteAlert_bot.py
__author__ = 'iLTeoooD'
from io import StringIO
from telebot import types
from SiteAlert import *
TOKEN = os.environ['SITE_ALERT_TOKEN']
site_alert = SiteAlert()
leng = ""
Array = {}
gen_markup = types.ReplyKeyboardRemove(selective=False)
wlcm_msg = "!\nWelcome to @SiteAlert_bot.\nCommands available:\n/ping - Pong\n/show - Print the list of saved sites\n/check - Check new website\n/addme - Notify me on an already registered site\n/removeme - Reverse action\n/register - Register your email\n/registered - Check if you are alredy registered, and show your subscribed sites\n/unregister - Delete your registration\n/link - Print the link associated to a website\n/mailoff - Disable mail notification\n/mailon - Reverse action\n/telegramoff - Disable telegram notification\n/telegramon - Reverse action\n/help - Print help message"
tb = telebot.TeleBot(TOKEN)
def overrideStdout(funcName, msg, credentials, nameSite="", link=""):
old_stdout = sys.stdout
result = StringIO()
sys.stdout = result
if funcName == "show":
site_alert.display_sites()
elif funcName == "check":
site_alert.add_site(nameSite, link, credentials[0], msg.chat.id)
sys.stdout = old_stdout
return result.getvalue()
@tb.message_handler(commands=['ping'])
def ping(m):
tb.send_message(m.chat.id, "Pong")
@tb.message_handler(commands=['show'])
def show(m):
tb.send_message(m.chat.id, overrideStdout("show", m, ""))
@tb.message_handler(commands=['check'])
def check(m):
credentials = site_alert.execute_fetch_all("SELECT mail FROM Users WHERE telegram =?", (m.chat.id,))
if len(credentials) > 0:
msg = tb.send_message(m.chat.id, "Ok, how we should call it?")
tb.register_next_step_handler(msg, ck1)
else:
tb.send_message(m.chat.id, "You must be registered.\nUse /register")
def ck1(m):
if not m.text.startswith("/"):
Array[m.chat.id] = m.text
msg = tb.send_message(m.chat.id, "Ok, got it.\nNow send the link of the website:")
tb.register_next_step_handler(msg, ck2)
else:
tb.send_message(m.chat.id, "Invalid name.")
def ck2(m):
if not m.text.startswith("/"):
credentials = site_alert.execute_fetch_all("SELECT mail FROM Users WHERE telegram =?", (m.chat.id,))[0]
tb.send_message(m.chat.id, overrideStdout("check", m, credentials, Array[m.chat.id], m.text))
del Array[m.chat.id]
else:
tb.send_message(m.chat.id, "Invalid name.")
@tb.message_handler(commands=['addme'])
def addme(m):
credentials = site_alert.execute_fetch_all("SELECT mail FROM Users WHERE telegram =?", (m.chat.id,))
if len(credentials) > 0:
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
sites = site_alert.execute_fetch_all(
"SELECT name FROM SiteAlert EXCEPT SELECT name FROM Registered, Users WHERE Registered.mail = Users.mail AND telegram = ? ORDER BY name COLLATE NOCASE",
(m.chat.id,))
for site in sites:
markup.add(site[0])
msg = tb.send_message(m.chat.id, "Ok, to...?", reply_markup=markup)
tb.register_next_step_handler(msg, am)
else:
tb.send_message(m.chat.id, "You must be registered.\nUse /register")
def am(m):
sites = site_alert.execute_fetch_all("SELECT * FROM SiteAlert WHERE name=?", (m.text,))
if len(sites) > 0:
credentials = site_alert.execute_fetch_all("SELECT mail FROM Users WHERE telegram =?", (m.chat.id,))[0]
try:
site_alert.execute_query("INSERT INTO Registered VALUES(?, ?)", (m.text, credentials[0]))
tb.send_message(m.chat.id, "Action completed successfully!", reply_markup=gen_markup)
except sqlite3.IntegrityError:
tb.send_message(m.chat.id, "You are already registered to this site!", reply_markup=gen_markup)
elif not m.text.startswith("/"):
tb.send_message(m.chat.id, "Invalid input.")
@tb.message_handler(commands=['removeme'])
def removeme(m):
credentials = site_alert.execute_fetch_all("SELECT mail FROM Users WHERE telegram =?", (m.chat.id,))
if len(credentials) > 0:
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
sites = site_alert.execute_fetch_all(
"SELECT name FROM Registered, Users WHERE Registered.mail = Users.mail AND telegram = ? ORDER BY name COLLATE NOCASE",
(m.chat.id,))
for site in sites:
markup.add(site[0])
msg = tb.send_message(m.chat.id, "Ok, from...?", reply_markup=markup)
tb.register_next_step_handler(msg, rm)
else:
tb.send_message(m.chat.id, "You must be registered.\nUse /register")
def rm(m):
sites = site_alert.execute_fetch_all("SELECT * FROM SiteAlert WHERE name=?", (m.text,))
if len(sites) > 0:
credentials = site_alert.execute_fetch_all("SELECT mail FROM Users WHERE telegram =?", (m.chat.id,))[0]
site_alert.execute_query("DELETE FROM Registered WHERE mail=? AND name=?", (credentials[0], m.text))
tb.send_message(m.chat.id, "Action completed successfully!", reply_markup=gen_markup)
elif not m.text.startswith("/"):
tb.send_message(m.chat.id, "Invalid input.")
@tb.message_handler(commands=['register'])
def register(m):
credentials = site_alert.execute_fetch_all("SELECT mail FROM Users WHERE telegram =?", (m.chat.id,))
if len(credentials) == 0:
msg = tb.send_message(m.chat.id, "Tell me your e-mail: ")
tb.register_next_step_handler(msg, reg)
else:
tb.send_message(m.chat.id, "User already registered.\nUse /registered")
def reg(m):
if re.match("^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$", m.text) is not None:
site_alert.execute_query("INSERT INTO Users VALUES(?,?,'True','True')", (m.text, m.chat.id))
tb.send_message(m.chat.id, "Action completed successfully!")
else:
tb.send_message(m.chat.id, "Invalid e-mail.")
@tb.message_handler(commands=['registered'])
def registered(m):
i = 1
credentials = site_alert.execute_fetch_all("SELECT mail FROM Users WHERE telegram =?", (m.chat.id,))
if len(credentials) > 0:
mymsg = "You have registered this e-mail: " + credentials[0] + "\nYour notification status is:\nE-mail: "
status = site_alert.execute_fetch_all("SELECT mailnotification FROM Users WHERE mail = ?", (credentials[0]))[0]
mymsg += status[0] + "\nTelegram: "
status = \
site_alert.execute_fetch_all("SELECT telegramnotification FROM Users WHERE mail = ?", (credentials[0]))[0]
mymsg += status[0] + "\nYou are registered to:"
for site in site_alert.execute_fetch_all("SELECT name FROM Registered WHERE mail = ?", (credentials[0])):
mymsg = mymsg + "\n" + str(i) + ") " + site[0]
i += 1
tb.send_message(m.chat.id, mymsg)
else:
tb.send_message(m.chat.id, "You must be registered.\nUse /register")
@tb.message_handler(commands=['link'])
def link(m):
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
sites = site_alert.execute_fetch_all("SELECT name FROM SiteAlert ORDER BY name COLLATE NOCASE", ())
for site in sites:
markup.add(site[0])
msg = tb.send_message(m.chat.id, "Of which site?", reply_markup=markup)
tb.register_next_step_handler(msg, lk)
def lk(m):
try:
link = site_alert.execute_fetch_all("SELECT link FROM SiteAlert WHERE name = ?", (m.text,))[0]
tb.send_message(m.chat.id, "To " + m.text + " corresponds: " + link[0], reply_markup=gen_markup)
except Exception:
tb.send_message(m.chat.id, "Invalid link.", reply_markup=gen_markup)
@tb.message_handler(commands=['unregister'])
def unregister(m):
mail = site_alert.execute_fetch_all("SELECT mail FROM Users WHERE telegram = ?", (m.chat.id,))[0]
if mail is not None:
site_alert.execute_query("DELETE FROM Users WHERE mail = ?", (mail[0],))
site_alert.execute_query("DELETE FROM Registered WHERE mail = ?", (mail[0],))
tb.send_message(m.chat.id, "Action completed successfully!", reply_markup=gen_markup)
else:
tb.send_message(m.chat.id, "You must be registered.\nUse /register")
@tb.message_handler(commands=['mailoff'])
def mailoff(m):
try:
site_alert.execute_query("UPDATE Users SET mailnotification = 'False' WHERE telegram = ?", (m.chat.id,))
tb.send_message(m.chat.id, "Action completed successfully!")
except sqlite3.IntegrityError:
tb.send_message(m.chat.id, "You must be registered.\nUse /register")
@tb.message_handler(commands=['mailon'])
def mailon(m):
try:
site_alert.execute_query("UPDATE Users SET mailnotification = 'True' WHERE telegram = ?", (m.chat.id,))
tb.send_message(m.chat.id, "Action completed successfully!")
except sqlite3.IntegrityError:
tb.send_message(m.chat.id, "You must be registered.\nUse /register")
@tb.message_handler(commands=['telegramoff'])
def telegramoff(m):
try:
site_alert.execute_query("UPDATE Users SET telegramnotification = 'False' WHERE telegram = ?", (m.chat.id,))
tb.send_message(m.chat.id, "Action completed successfully!")
except sqlite3.IntegrityError:
tb.send_message(m.chat.id, "You must be registered.\nUse /register")
@tb.message_handler(commands=['telegramon'])
def telegramon(m):
try:
site_alert.execute_query("UPDATE Users SET telegramnotification = 'True' WHERE telegram = ?", (m.chat.id,))
tb.send_message(m.chat.id, "Action completed successfully!")
except sqlite3.IntegrityError:
tb.send_message(m.chat.id, "You must be registered.\nUse /register")
@tb.message_handler(commands=['cancel'])
def cancel(m):
tb.send_message(m.chat.id, "Ok, I forgot everything!", reply_markup=gen_markup)
@tb.message_handler(commands=['help', 'start'])
def help(m):
if m.chat.first_name is not None:
if m.chat.last_name is not None:
tb.send_message(m.chat.id, "Hello, " + m.chat.first_name + " " + m.chat.last_name + wlcm_msg)
else:
tb.send_message(m.chat.id, "Hello, " + m.chat.first_name + wlcm_msg)
else:
tb.send_message(m.chat.id, "Hello, " + m.chat.title + wlcm_msg)
tb.polling(none_stop=True)
|
<reponame>d4nnyk/DarunGrim
import sys
sys.path.append(r'.')
sys.path.append(r'T:\mat\Projects\ResearchTools\Binary\StaticAnalysis\DarunGrim2\Bin\DarunGrim2')
sys.path.append(r'..')
sys.path.append(r'..\Diff Inspector')
import os
import PatchTimeline
import DarunGrimEngine
import PatchDatabaseWrapper
import DarunGrimAnalyzers
import DarunGrimDatabaseWrapper
Differs = {}
class Manager:
DebugLevel = 1
SourceFileName = ''
TargetFileName = ''
LogFilename = None
LogFilenameForSource = None
LogFilenameForTarget = None
def __init__( self, databasename = 'test.db', binary_store_directory = r'c:\mat\Projects\Binaries', output_directory = r'C:\mat\Projects\DGFs',ida_path = None ):
self.DatabaseFilename = databasename
self.BinariesStorageDirectory = binary_store_directory
self.OutputDirectory = output_directory
self.IDAPath = None
if ida_path:
if os.path.isfile( ida_path ):
self.IDAPath = ida_path
if not self.IDAPath:
for filename in ( r'C:\Program Files\IDA\idag.exe', r'C:\Program Files (x86)\IDA\idag.exe' ):
if os.path.isfile( filename ):
self.IDAPath = filename
break
self.InstallPlugin()
if not os.path.isdir( self.OutputDirectory ):
os.makedirs( self.OutputDirectory )
if self.DebugLevel > 2:
print 'DatabaseFilename=', self.DatabaseFilename
print 'BinariesStorageDirectory=', self.BinariesStorageDirectory
print 'OutputDirectory=', self.OutputDirectory
print 'IDAPath=', self.IDAPath
self.PatchTimelineAnalyzer = PatchTimeline.Analyzer( self.DatabaseFilename )
def InstallPlugin( self ):
plugins_dst_dir = os.path.join( os.path.dirname( self.IDAPath ), "plugins" )
if not os.path.isdir( plugins_dst_dir ):
plugins_dst_dir = None
for one_plugins_dst_dir in ( r'C:\Program Files\IDA\plugins', r'C:\Program Files (x86)\IDA\plugins' ):
if os.path.isdir( one_plugins_dst_dir ):
plugins_dst_dir = one_plugins_dst_dir
if self.DebugLevel > 2:
print 'plugins_dst_dir=',plugins_dst_dir
if plugins_dst_dir:
#copy r'Plugin\*.plw -> plugins_dst_dir
plugins_src_dir = 'Plugin'
plugin_file = 'DarunGrim2.plw'
src_file = os.path.join( plugins_src_dir, plugin_file )
dst_file = os.path.join( plugins_dst_dir, plugin_file )
if self.DebugLevel > 2:
print 'Checking', src_file, '->', dst_file
if os.path.isfile( src_file ) and not os.path.isfile( dst_file ):
if self.DebugLevel > 0:
print 'Copying', src_file, '->', dst_file
import shutil
shutil.copyfile( src_file, dst_file )
def InitMSFileDiff( self, patch_name, filename ):
if self.DebugLevel > 0:
print 'Analyzing', patch_name, filename
for ( target_patch_name, target_file_entry, source_patch_name, source_file_entry ) in self.PatchTimelineAnalyzer.GetPatchPairsForAnalysis( filename, patch_name ):
if self.DebugLevel > 0:
print '='*80
print target_patch_name,source_patch_name
source_filename = source_file_entry['full_path']
target_filename = target_file_entry['full_path']
if self.DebugLevel > 0:
print source_patch_name, source_filename, target_patch_name, target_filename
differ = self.InitFileDiff( source_patch_name, source_filename, target_patch_name, target_filename )
def InitFileDiffByID( self, source_id, target_id, databasename = None, reset_database = False ):
database = PatchDatabaseWrapper.Database( self.DatabaseFilename )
source_file_entries = database.GetFileByID( source_id )
if self.DebugLevel > 0:
print 'source', source_id, source_file_entries
if source_file_entries and len(source_file_entries) > 0:
source_patch_name = 'None'
if source_file_entries[0].downloads and source_file_entries[0].downloads.patches.name:
source_patch_name = source_file_entries[0].downloads.patches.name
source_filename = os.path.join( self.BinariesStorageDirectory, source_file_entries[0].full_path )
target_file_entries = database.GetFileByID( target_id )
if self.DebugLevel > 0:
print target_id, target_file_entries
target_patch_name = 'None'
if target_file_entries and len(target_file_entries) > 0:
if target_file_entries[0].downloads and target_file_entries[0].downloads.patches.name:
target_patch_name = target_file_entries[0].downloads.patches.name
target_filename = os.path.join( self.BinariesStorageDirectory, target_file_entries[0].full_path )
if not databasename:
databasename = self.GetDefaultDatabasename( source_id, target_id )
if reset_database:
self.RemoveDiffer( source_id, target_id )
diff = None
if source_patch_name and source_filename and target_patch_name and target_filename and databasename:
self.SourceFileName = source_filename
self.TargetFileName = target_filename
differ = self.InitFileDiff( source_patch_name, source_filename, target_patch_name, target_filename, databasename, reset_database )
self.SetDiffer( source_id, target_id, differ )
return differ
def GetDefaultDatabasename( self, source_id, target_id ):
databasename = str( source_id ) + '_' + str( target_id ) + ".dgf"
return databasename
def SetDiffer( self, source_id, target_id, differ ):
global Differs
Differs[ str( source_id ) + '_' + str( target_id ) ] = differ
def RemoveDiffer( self, source_id, target_id ):
key = str( source_id ) + '_' + str( target_id )
global Differs
if Differs.has_key( key ):
print 'Removing', key
differ = Differs[ key ]
del differ
del Differs[ key ]
def GetDiffer( self, source_id, target_id ):
key = str( source_id ) + '_' + str( target_id )
global Differs
if Differs.has_key( key ):
return Differs[ key ]
return None
def InitFileDiff( self, source_patch_name = '', source_filename = '', target_patch_name = '', target_filename = '', databasename = '', reset_database = False ):
if self.DebugLevel > 10:
print '='*80
print 'source_patch_name=',source_patch_name
print 'source_filename=',source_filename
print 'target_patch_name=',target_patch_name
print 'target_filename=',target_filename
print 'databasename=',databasename
base_filename = os.path.basename( source_filename )
dot_pos = base_filename.find('.')
if dot_pos >= 0:
base_filename = base_filename[:dot_pos]
if not databasename:
prefix = target_patch_name + '-' + source_patch_name + '-' + base_filename
databasename = prefix + ".dgf"
full_databasename = os.path.join( self.OutputDirectory , databasename )
log_filename = os.path.join( self.OutputDirectory , prefix + ".log" )
ida_log_filename_for_source = os.path.join( self.OutputDirectory , prefix + "-source.log" )
ida_logfilename_for_target = os.path.join( self.OutputDirectory , prefix + "-target.log" )
else:
full_databasename = databasename
log_filename = full_databasename + ".log"
ida_log_filename_for_source = full_databasename + "-source.log"
ida_logfilename_for_target = full_databasename + "-target.log"
if reset_database:
if self.DebugLevel > 0:
print 'Removing', full_databasename
os.remove( full_databasename )
differ = self.LoadDiffer( full_databasename, source_filename, target_filename )
self.DatabaseName = full_databasename
if not differ:
differ = DarunGrimEngine.Differ( source_filename, target_filename )
differ.SetIDAPath( self.IDAPath )
if self.DebugLevel > 2:
print 'source_filename',source_filename
print 'target_filename',target_filename
print 'databasename',databasename
print 'log_filename', log_filename
print 'ida_log_filename_for_source', ida_log_filename_for_source
print 'ida_logfilename_for_target', ida_logfilename_for_target
differ.DiffFile( full_databasename, log_filename, ida_log_filename_for_source, ida_logfilename_for_target )
self.LogFilename = log_filename
self.LogFilenameForSource = ida_log_filename_for_source
self.LogFilenameForTarget = ida_logfilename_for_target
self.UpdateSecurityImplicationsScore( full_databasename )
return differ
def LoadDiffer( self, databasename, source_filename = None, target_filename = None ):
if os.path.isfile( databasename ) and os.path.getsize( databasename ) > 0:
database = DarunGrimDatabaseWrapper.Database( databasename )
function_match_info_count = database.GetFunctionMatchInfoCount()
del database
if function_match_info_count > 0:
differ = DarunGrimEngine.Differ( source_filename, target_filename )
differ.SetIDAPath( self.IDAPath )
if self.DebugLevel > 0:
print 'Already analyzed',databasename
differ.LoadDiffResults( databasename )
return differ
return None
def SyncIDA( self, source_id, target_id):
differ = self.GetDiffer( source_id, target_id )
if not differ:
differ = self.InitFileDiffByID( source_id, target_id )
if differ:
differ.SyncIDA();
def ShowAddresses( self, source_id, target_id, source_address, target_address ):
differ = self.GetDiffer( source_id, target_id )
if differ:
differ.ShowAddresses( source_address, target_address )
def ColorAddresses( self, source_id, target_id, source_address_infos, target_address_infos ):
differ = self.GetDiffer( source_id, target_id )
if differ:
for (source_address_start, source_address_end, match_rate) in source_address_infos:
color = self.GetColorForMatchRate( match_rate )
differ.ColorAddress( 0, source_address_start, source_address_end, color )
for (target_address_start, target_address_end, match_rate) in target_address_infos:
color = self.GetColorForMatchRate( match_rate )
differ.ColorAddress( 1, target_address_start, target_address_end, color )
def GetColorForMatchRate( self, match_rate ):
if match_rate == 0:
return 0x0000ff
elif match_rate == 100:
return 0xffffff
return 0x00ffff
def UpdateSecurityImplicationsScore( self, databasename ):
database = DarunGrimDatabaseWrapper.Database( databasename )
pattern_analyzer = DarunGrimAnalyzers.PatternAnalyzer()
for function_match_info in database.GetFunctionMatchInfo():
if function_match_info.non_match_count_for_the_source > 0 or \
function_match_info.non_match_count_for_the_target > 0 or \
function_match_info.match_count_with_modificationfor_the_source > 0:
function_match_info.security_implications_score = pattern_analyzer.GetSecurityImplicationsScore(
databasename,
function_match_info.source_address,
function_match_info.target_address )
database.Commit()
def InitMSFileDiffAll( self ):
for ( patch_name, filename ) in self.PatchTimelineAnalyzer.GetPatchFileNamePairs():
self.InitMSFileDiff( patch_name, filename )
if __name__ == '__main__':
file_differ = Manager()
file_differ.InitMSFileDiffAll() |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.dlp_v2.types import dlp
from google.protobuf import empty_pb2 # type: ignore
from .base import DlpServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import DlpServiceGrpcTransport
class DlpServiceGrpcAsyncIOTransport(DlpServiceTransport):
"""gRPC AsyncIO backend transport for DlpService.
The Cloud Data Loss Prevention (DLP) API is a service that
allows clients to detect the presence of Personally Identifiable
Information (PII) and other privacy-sensitive data in user-
supplied, unstructured data streams, like text blocks or images.
The service also includes methods for sensitive data redaction
and scheduling of data scans on Google Cloud Platform based data
sets.
To learn more about concepts and find how-to guides see
https://cloud.google.com/dlp/docs/.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "dlp.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "dlp.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def inspect_content(
self,
) -> Callable[[dlp.InspectContentRequest], Awaitable[dlp.InspectContentResponse]]:
r"""Return a callable for the inspect content method over gRPC.
Finds potentially sensitive info in content.
This method has limits on input size, processing time,
and output size.
When no InfoTypes or CustomInfoTypes are specified in
this request, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
For how to guides, see
https://cloud.google.com/dlp/docs/inspecting-images and
https://cloud.google.com/dlp/docs/inspecting-text,
Returns:
Callable[[~.InspectContentRequest],
Awaitable[~.InspectContentResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "inspect_content" not in self._stubs:
self._stubs["inspect_content"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/InspectContent",
request_serializer=dlp.InspectContentRequest.serialize,
response_deserializer=dlp.InspectContentResponse.deserialize,
)
return self._stubs["inspect_content"]
@property
def redact_image(
self,
) -> Callable[[dlp.RedactImageRequest], Awaitable[dlp.RedactImageResponse]]:
r"""Return a callable for the redact image method over gRPC.
Redacts potentially sensitive info from an image.
This method has limits on input size, processing time,
and output size. See
https://cloud.google.com/dlp/docs/redacting-sensitive-
data-images to learn more.
When no InfoTypes or CustomInfoTypes are specified in
this request, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
Returns:
Callable[[~.RedactImageRequest],
Awaitable[~.RedactImageResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "redact_image" not in self._stubs:
self._stubs["redact_image"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/RedactImage",
request_serializer=dlp.RedactImageRequest.serialize,
response_deserializer=dlp.RedactImageResponse.deserialize,
)
return self._stubs["redact_image"]
@property
def deidentify_content(
self,
) -> Callable[
[dlp.DeidentifyContentRequest], Awaitable[dlp.DeidentifyContentResponse]
]:
r"""Return a callable for the deidentify content method over gRPC.
De-identifies potentially sensitive info from a
ContentItem. This method has limits on input size and
output size. See
https://cloud.google.com/dlp/docs/deidentify-sensitive-
data to learn more.
When no InfoTypes or CustomInfoTypes are specified in
this request, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
Returns:
Callable[[~.DeidentifyContentRequest],
Awaitable[~.DeidentifyContentResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "deidentify_content" not in self._stubs:
self._stubs["deidentify_content"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/DeidentifyContent",
request_serializer=dlp.DeidentifyContentRequest.serialize,
response_deserializer=dlp.DeidentifyContentResponse.deserialize,
)
return self._stubs["deidentify_content"]
@property
def reidentify_content(
self,
) -> Callable[
[dlp.ReidentifyContentRequest], Awaitable[dlp.ReidentifyContentResponse]
]:
r"""Return a callable for the reidentify content method over gRPC.
Re-identifies content that has been de-identified. See
https://cloud.google.com/dlp/docs/pseudonymization#re-identification_in_free_text_code_example
to learn more.
Returns:
Callable[[~.ReidentifyContentRequest],
Awaitable[~.ReidentifyContentResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "reidentify_content" not in self._stubs:
self._stubs["reidentify_content"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ReidentifyContent",
request_serializer=dlp.ReidentifyContentRequest.serialize,
response_deserializer=dlp.ReidentifyContentResponse.deserialize,
)
return self._stubs["reidentify_content"]
@property
def list_info_types(
self,
) -> Callable[[dlp.ListInfoTypesRequest], Awaitable[dlp.ListInfoTypesResponse]]:
r"""Return a callable for the list info types method over gRPC.
Returns a list of the sensitive information types
that the DLP API supports. See
https://cloud.google.com/dlp/docs/infotypes-reference to
learn more.
Returns:
Callable[[~.ListInfoTypesRequest],
Awaitable[~.ListInfoTypesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_info_types" not in self._stubs:
self._stubs["list_info_types"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ListInfoTypes",
request_serializer=dlp.ListInfoTypesRequest.serialize,
response_deserializer=dlp.ListInfoTypesResponse.deserialize,
)
return self._stubs["list_info_types"]
@property
def create_inspect_template(
self,
) -> Callable[[dlp.CreateInspectTemplateRequest], Awaitable[dlp.InspectTemplate]]:
r"""Return a callable for the create inspect template method over gRPC.
Creates an InspectTemplate for re-using frequently
used configuration for inspecting content, images, and
storage. See https://cloud.google.com/dlp/docs/creating-
templates to learn more.
Returns:
Callable[[~.CreateInspectTemplateRequest],
Awaitable[~.InspectTemplate]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_inspect_template" not in self._stubs:
self._stubs["create_inspect_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/CreateInspectTemplate",
request_serializer=dlp.CreateInspectTemplateRequest.serialize,
response_deserializer=dlp.InspectTemplate.deserialize,
)
return self._stubs["create_inspect_template"]
@property
def update_inspect_template(
self,
) -> Callable[[dlp.UpdateInspectTemplateRequest], Awaitable[dlp.InspectTemplate]]:
r"""Return a callable for the update inspect template method over gRPC.
Updates the InspectTemplate.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Returns:
Callable[[~.UpdateInspectTemplateRequest],
Awaitable[~.InspectTemplate]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_inspect_template" not in self._stubs:
self._stubs["update_inspect_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/UpdateInspectTemplate",
request_serializer=dlp.UpdateInspectTemplateRequest.serialize,
response_deserializer=dlp.InspectTemplate.deserialize,
)
return self._stubs["update_inspect_template"]
@property
def get_inspect_template(
self,
) -> Callable[[dlp.GetInspectTemplateRequest], Awaitable[dlp.InspectTemplate]]:
r"""Return a callable for the get inspect template method over gRPC.
Gets an InspectTemplate.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Returns:
Callable[[~.GetInspectTemplateRequest],
Awaitable[~.InspectTemplate]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_inspect_template" not in self._stubs:
self._stubs["get_inspect_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/GetInspectTemplate",
request_serializer=dlp.GetInspectTemplateRequest.serialize,
response_deserializer=dlp.InspectTemplate.deserialize,
)
return self._stubs["get_inspect_template"]
@property
def list_inspect_templates(
self,
) -> Callable[
[dlp.ListInspectTemplatesRequest], Awaitable[dlp.ListInspectTemplatesResponse]
]:
r"""Return a callable for the list inspect templates method over gRPC.
Lists InspectTemplates.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Returns:
Callable[[~.ListInspectTemplatesRequest],
Awaitable[~.ListInspectTemplatesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_inspect_templates" not in self._stubs:
self._stubs["list_inspect_templates"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ListInspectTemplates",
request_serializer=dlp.ListInspectTemplatesRequest.serialize,
response_deserializer=dlp.ListInspectTemplatesResponse.deserialize,
)
return self._stubs["list_inspect_templates"]
@property
def delete_inspect_template(
self,
) -> Callable[[dlp.DeleteInspectTemplateRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete inspect template method over gRPC.
Deletes an InspectTemplate.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Returns:
Callable[[~.DeleteInspectTemplateRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_inspect_template" not in self._stubs:
self._stubs["delete_inspect_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/DeleteInspectTemplate",
request_serializer=dlp.DeleteInspectTemplateRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_inspect_template"]
@property
def create_deidentify_template(
self,
) -> Callable[
[dlp.CreateDeidentifyTemplateRequest], Awaitable[dlp.DeidentifyTemplate]
]:
r"""Return a callable for the create deidentify template method over gRPC.
Creates a DeidentifyTemplate for re-using frequently
used configuration for de-identifying content, images,
and storage. See
https://cloud.google.com/dlp/docs/creating-templates-
deid to learn more.
Returns:
Callable[[~.CreateDeidentifyTemplateRequest],
Awaitable[~.DeidentifyTemplate]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_deidentify_template" not in self._stubs:
self._stubs["create_deidentify_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/CreateDeidentifyTemplate",
request_serializer=dlp.CreateDeidentifyTemplateRequest.serialize,
response_deserializer=dlp.DeidentifyTemplate.deserialize,
)
return self._stubs["create_deidentify_template"]
@property
def update_deidentify_template(
self,
) -> Callable[
[dlp.UpdateDeidentifyTemplateRequest], Awaitable[dlp.DeidentifyTemplate]
]:
r"""Return a callable for the update deidentify template method over gRPC.
Updates the DeidentifyTemplate.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Returns:
Callable[[~.UpdateDeidentifyTemplateRequest],
Awaitable[~.DeidentifyTemplate]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_deidentify_template" not in self._stubs:
self._stubs["update_deidentify_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/UpdateDeidentifyTemplate",
request_serializer=dlp.UpdateDeidentifyTemplateRequest.serialize,
response_deserializer=dlp.DeidentifyTemplate.deserialize,
)
return self._stubs["update_deidentify_template"]
@property
def get_deidentify_template(
self,
) -> Callable[
[dlp.GetDeidentifyTemplateRequest], Awaitable[dlp.DeidentifyTemplate]
]:
r"""Return a callable for the get deidentify template method over gRPC.
Gets a DeidentifyTemplate.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Returns:
Callable[[~.GetDeidentifyTemplateRequest],
Awaitable[~.DeidentifyTemplate]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_deidentify_template" not in self._stubs:
self._stubs["get_deidentify_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/GetDeidentifyTemplate",
request_serializer=dlp.GetDeidentifyTemplateRequest.serialize,
response_deserializer=dlp.DeidentifyTemplate.deserialize,
)
return self._stubs["get_deidentify_template"]
@property
def list_deidentify_templates(
self,
) -> Callable[
[dlp.ListDeidentifyTemplatesRequest],
Awaitable[dlp.ListDeidentifyTemplatesResponse],
]:
r"""Return a callable for the list deidentify templates method over gRPC.
Lists DeidentifyTemplates.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Returns:
Callable[[~.ListDeidentifyTemplatesRequest],
Awaitable[~.ListDeidentifyTemplatesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_deidentify_templates" not in self._stubs:
self._stubs["list_deidentify_templates"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ListDeidentifyTemplates",
request_serializer=dlp.ListDeidentifyTemplatesRequest.serialize,
response_deserializer=dlp.ListDeidentifyTemplatesResponse.deserialize,
)
return self._stubs["list_deidentify_templates"]
@property
def delete_deidentify_template(
self,
) -> Callable[[dlp.DeleteDeidentifyTemplateRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete deidentify template method over gRPC.
Deletes a DeidentifyTemplate.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Returns:
Callable[[~.DeleteDeidentifyTemplateRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_deidentify_template" not in self._stubs:
self._stubs["delete_deidentify_template"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/DeleteDeidentifyTemplate",
request_serializer=dlp.DeleteDeidentifyTemplateRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_deidentify_template"]
@property
def create_job_trigger(
self,
) -> Callable[[dlp.CreateJobTriggerRequest], Awaitable[dlp.JobTrigger]]:
r"""Return a callable for the create job trigger method over gRPC.
Creates a job trigger to run DLP actions such as
scanning storage for sensitive information on a set
schedule. See
https://cloud.google.com/dlp/docs/creating-job-triggers
to learn more.
Returns:
Callable[[~.CreateJobTriggerRequest],
Awaitable[~.JobTrigger]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_job_trigger" not in self._stubs:
self._stubs["create_job_trigger"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/CreateJobTrigger",
request_serializer=dlp.CreateJobTriggerRequest.serialize,
response_deserializer=dlp.JobTrigger.deserialize,
)
return self._stubs["create_job_trigger"]
@property
def update_job_trigger(
self,
) -> Callable[[dlp.UpdateJobTriggerRequest], Awaitable[dlp.JobTrigger]]:
r"""Return a callable for the update job trigger method over gRPC.
Updates a job trigger.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Returns:
Callable[[~.UpdateJobTriggerRequest],
Awaitable[~.JobTrigger]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_job_trigger" not in self._stubs:
self._stubs["update_job_trigger"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/UpdateJobTrigger",
request_serializer=dlp.UpdateJobTriggerRequest.serialize,
response_deserializer=dlp.JobTrigger.deserialize,
)
return self._stubs["update_job_trigger"]
@property
def hybrid_inspect_job_trigger(
self,
) -> Callable[
[dlp.HybridInspectJobTriggerRequest], Awaitable[dlp.HybridInspectResponse]
]:
r"""Return a callable for the hybrid inspect job trigger method over gRPC.
Inspect hybrid content and store findings to a
trigger. The inspection will be processed
asynchronously. To review the findings monitor the jobs
within the trigger.
Returns:
Callable[[~.HybridInspectJobTriggerRequest],
Awaitable[~.HybridInspectResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "hybrid_inspect_job_trigger" not in self._stubs:
self._stubs["hybrid_inspect_job_trigger"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/HybridInspectJobTrigger",
request_serializer=dlp.HybridInspectJobTriggerRequest.serialize,
response_deserializer=dlp.HybridInspectResponse.deserialize,
)
return self._stubs["hybrid_inspect_job_trigger"]
@property
def get_job_trigger(
self,
) -> Callable[[dlp.GetJobTriggerRequest], Awaitable[dlp.JobTrigger]]:
r"""Return a callable for the get job trigger method over gRPC.
Gets a job trigger.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Returns:
Callable[[~.GetJobTriggerRequest],
Awaitable[~.JobTrigger]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_job_trigger" not in self._stubs:
self._stubs["get_job_trigger"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/GetJobTrigger",
request_serializer=dlp.GetJobTriggerRequest.serialize,
response_deserializer=dlp.JobTrigger.deserialize,
)
return self._stubs["get_job_trigger"]
@property
def list_job_triggers(
self,
) -> Callable[[dlp.ListJobTriggersRequest], Awaitable[dlp.ListJobTriggersResponse]]:
r"""Return a callable for the list job triggers method over gRPC.
Lists job triggers.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Returns:
Callable[[~.ListJobTriggersRequest],
Awaitable[~.ListJobTriggersResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_job_triggers" not in self._stubs:
self._stubs["list_job_triggers"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ListJobTriggers",
request_serializer=dlp.ListJobTriggersRequest.serialize,
response_deserializer=dlp.ListJobTriggersResponse.deserialize,
)
return self._stubs["list_job_triggers"]
@property
def delete_job_trigger(
self,
) -> Callable[[dlp.DeleteJobTriggerRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete job trigger method over gRPC.
Deletes a job trigger.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Returns:
Callable[[~.DeleteJobTriggerRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_job_trigger" not in self._stubs:
self._stubs["delete_job_trigger"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/DeleteJobTrigger",
request_serializer=dlp.DeleteJobTriggerRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_job_trigger"]
@property
def activate_job_trigger(
self,
) -> Callable[[dlp.ActivateJobTriggerRequest], Awaitable[dlp.DlpJob]]:
r"""Return a callable for the activate job trigger method over gRPC.
Activate a job trigger. Causes the immediate execute
of a trigger instead of waiting on the trigger event to
occur.
Returns:
Callable[[~.ActivateJobTriggerRequest],
Awaitable[~.DlpJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "activate_job_trigger" not in self._stubs:
self._stubs["activate_job_trigger"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ActivateJobTrigger",
request_serializer=dlp.ActivateJobTriggerRequest.serialize,
response_deserializer=dlp.DlpJob.deserialize,
)
return self._stubs["activate_job_trigger"]
@property
def create_dlp_job(
self,
) -> Callable[[dlp.CreateDlpJobRequest], Awaitable[dlp.DlpJob]]:
r"""Return a callable for the create dlp job method over gRPC.
Creates a new job to inspect storage or calculate
risk metrics. See
https://cloud.google.com/dlp/docs/inspecting-storage and
https://cloud.google.com/dlp/docs/compute-risk-analysis
to learn more.
When no InfoTypes or CustomInfoTypes are specified in
inspect jobs, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
Returns:
Callable[[~.CreateDlpJobRequest],
Awaitable[~.DlpJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_dlp_job" not in self._stubs:
self._stubs["create_dlp_job"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/CreateDlpJob",
request_serializer=dlp.CreateDlpJobRequest.serialize,
response_deserializer=dlp.DlpJob.deserialize,
)
return self._stubs["create_dlp_job"]
@property
def list_dlp_jobs(
self,
) -> Callable[[dlp.ListDlpJobsRequest], Awaitable[dlp.ListDlpJobsResponse]]:
r"""Return a callable for the list dlp jobs method over gRPC.
Lists DlpJobs that match the specified filter in the
request. See
https://cloud.google.com/dlp/docs/inspecting-storage and
https://cloud.google.com/dlp/docs/compute-risk-analysis
to learn more.
Returns:
Callable[[~.ListDlpJobsRequest],
Awaitable[~.ListDlpJobsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_dlp_jobs" not in self._stubs:
self._stubs["list_dlp_jobs"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ListDlpJobs",
request_serializer=dlp.ListDlpJobsRequest.serialize,
response_deserializer=dlp.ListDlpJobsResponse.deserialize,
)
return self._stubs["list_dlp_jobs"]
@property
def get_dlp_job(self) -> Callable[[dlp.GetDlpJobRequest], Awaitable[dlp.DlpJob]]:
r"""Return a callable for the get dlp job method over gRPC.
Gets the latest state of a long-running DlpJob.
See https://cloud.google.com/dlp/docs/inspecting-storage
and https://cloud.google.com/dlp/docs/compute-risk-
analysis to learn more.
Returns:
Callable[[~.GetDlpJobRequest],
Awaitable[~.DlpJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_dlp_job" not in self._stubs:
self._stubs["get_dlp_job"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/GetDlpJob",
request_serializer=dlp.GetDlpJobRequest.serialize,
response_deserializer=dlp.DlpJob.deserialize,
)
return self._stubs["get_dlp_job"]
@property
def delete_dlp_job(
self,
) -> Callable[[dlp.DeleteDlpJobRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete dlp job method over gRPC.
Deletes a long-running DlpJob. This method indicates
that the client is no longer interested in the DlpJob
result. The job will be cancelled if possible.
See https://cloud.google.com/dlp/docs/inspecting-storage
and https://cloud.google.com/dlp/docs/compute-risk-
analysis to learn more.
Returns:
Callable[[~.DeleteDlpJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_dlp_job" not in self._stubs:
self._stubs["delete_dlp_job"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/DeleteDlpJob",
request_serializer=dlp.DeleteDlpJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_dlp_job"]
@property
def cancel_dlp_job(
self,
) -> Callable[[dlp.CancelDlpJobRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the cancel dlp job method over gRPC.
Starts asynchronous cancellation on a long-running
DlpJob. The server makes a best effort to cancel the
DlpJob, but success is not guaranteed.
See https://cloud.google.com/dlp/docs/inspecting-storage
and https://cloud.google.com/dlp/docs/compute-risk-
analysis to learn more.
Returns:
Callable[[~.CancelDlpJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_dlp_job" not in self._stubs:
self._stubs["cancel_dlp_job"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/CancelDlpJob",
request_serializer=dlp.CancelDlpJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["cancel_dlp_job"]
@property
def create_stored_info_type(
self,
) -> Callable[[dlp.CreateStoredInfoTypeRequest], Awaitable[dlp.StoredInfoType]]:
r"""Return a callable for the create stored info type method over gRPC.
Creates a pre-built stored infoType to be used for
inspection. See
https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Returns:
Callable[[~.CreateStoredInfoTypeRequest],
Awaitable[~.StoredInfoType]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_stored_info_type" not in self._stubs:
self._stubs["create_stored_info_type"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/CreateStoredInfoType",
request_serializer=dlp.CreateStoredInfoTypeRequest.serialize,
response_deserializer=dlp.StoredInfoType.deserialize,
)
return self._stubs["create_stored_info_type"]
@property
def update_stored_info_type(
self,
) -> Callable[[dlp.UpdateStoredInfoTypeRequest], Awaitable[dlp.StoredInfoType]]:
r"""Return a callable for the update stored info type method over gRPC.
Updates the stored infoType by creating a new
version. The existing version will continue to be used
until the new version is ready. See
https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Returns:
Callable[[~.UpdateStoredInfoTypeRequest],
Awaitable[~.StoredInfoType]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_stored_info_type" not in self._stubs:
self._stubs["update_stored_info_type"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/UpdateStoredInfoType",
request_serializer=dlp.UpdateStoredInfoTypeRequest.serialize,
response_deserializer=dlp.StoredInfoType.deserialize,
)
return self._stubs["update_stored_info_type"]
@property
def get_stored_info_type(
self,
) -> Callable[[dlp.GetStoredInfoTypeRequest], Awaitable[dlp.StoredInfoType]]:
r"""Return a callable for the get stored info type method over gRPC.
Gets a stored infoType.
See https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Returns:
Callable[[~.GetStoredInfoTypeRequest],
Awaitable[~.StoredInfoType]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_stored_info_type" not in self._stubs:
self._stubs["get_stored_info_type"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/GetStoredInfoType",
request_serializer=dlp.GetStoredInfoTypeRequest.serialize,
response_deserializer=dlp.StoredInfoType.deserialize,
)
return self._stubs["get_stored_info_type"]
@property
def list_stored_info_types(
self,
) -> Callable[
[dlp.ListStoredInfoTypesRequest], Awaitable[dlp.ListStoredInfoTypesResponse]
]:
r"""Return a callable for the list stored info types method over gRPC.
Lists stored infoTypes.
See https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Returns:
Callable[[~.ListStoredInfoTypesRequest],
Awaitable[~.ListStoredInfoTypesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_stored_info_types" not in self._stubs:
self._stubs["list_stored_info_types"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/ListStoredInfoTypes",
request_serializer=dlp.ListStoredInfoTypesRequest.serialize,
response_deserializer=dlp.ListStoredInfoTypesResponse.deserialize,
)
return self._stubs["list_stored_info_types"]
@property
def delete_stored_info_type(
self,
) -> Callable[[dlp.DeleteStoredInfoTypeRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete stored info type method over gRPC.
Deletes a stored infoType.
See https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Returns:
Callable[[~.DeleteStoredInfoTypeRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_stored_info_type" not in self._stubs:
self._stubs["delete_stored_info_type"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/DeleteStoredInfoType",
request_serializer=dlp.DeleteStoredInfoTypeRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_stored_info_type"]
@property
def hybrid_inspect_dlp_job(
self,
) -> Callable[
[dlp.HybridInspectDlpJobRequest], Awaitable[dlp.HybridInspectResponse]
]:
r"""Return a callable for the hybrid inspect dlp job method over gRPC.
Inspect hybrid content and store findings to a job.
To review the findings, inspect the job. Inspection will
occur asynchronously.
Returns:
Callable[[~.HybridInspectDlpJobRequest],
Awaitable[~.HybridInspectResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "hybrid_inspect_dlp_job" not in self._stubs:
self._stubs["hybrid_inspect_dlp_job"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/HybridInspectDlpJob",
request_serializer=dlp.HybridInspectDlpJobRequest.serialize,
response_deserializer=dlp.HybridInspectResponse.deserialize,
)
return self._stubs["hybrid_inspect_dlp_job"]
@property
def finish_dlp_job(
self,
) -> Callable[[dlp.FinishDlpJobRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the finish dlp job method over gRPC.
Finish a running hybrid DlpJob. Triggers the
finalization steps and running of any enabled actions
that have not yet run.
Returns:
Callable[[~.FinishDlpJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "finish_dlp_job" not in self._stubs:
self._stubs["finish_dlp_job"] = self.grpc_channel.unary_unary(
"/google.privacy.dlp.v2.DlpService/FinishDlpJob",
request_serializer=dlp.FinishDlpJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["finish_dlp_job"]
def close(self):
return self.grpc_channel.close()
__all__ = ("DlpServiceGrpcAsyncIOTransport",)
|
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Licensed under BSD3. See the LICENSE.txt file in the root of this distribution.
from simulation.aivika.modeler.model import *
from simulation.aivika.modeler.port import *
from simulation.aivika.modeler.stream import *
from simulation.aivika.modeler.data_type import *
from simulation.aivika.modeler.pdf import *
def uniform_random_server(transact_type, min_delay, max_delay, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with random delays distributed uniformly."""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomUniformServer '
code += str(preemptible)
code += ' '
code += str(min_delay)
code += ' '
code += str(max_delay)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
def uniform_int_random_server(transact_type, min_delay, max_delay, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with integer random delays distributed uniformly."""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomUniformIntServer '
code += str(preemptible)
code += ' '
code += str(min_delay)
code += ' '
code += str(max_delay)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
def triangular_random_server(transact_type, min_delay, median_delay, max_delay, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with random delays having the triangular distribution."""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomTriangularServer '
code += str(preemptible)
code += ' '
code += str(min_delay)
code += ' '
code += str(median_delay)
code += ' '
code += str(max_delay)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
def normal_random_server(transact_type, mean_delay, delay_deviation, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with random delays having the normal distribution."""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomNormalServer '
code += str(preemptible)
code += ' '
code += str(mean_delay)
code += ' '
code += str(delay_deviation)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
def lognormal_random_server(transact_type, normal_mean_delay, normal_delay_deviation, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with random delays having the lognormal distribution.
The numerical parameters are related to the normal distribution that
this distribution is derived from.
"""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomLogNormalServer '
code += str(preemptible)
code += ' '
code += str(normal_mean_delay)
code += ' '
code += str(normal_delay_deviation)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
def exponential_random_server(transact_type, mean_delay, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with random delays having the exponential distribution with the specified mean (a reciprocal of the rate)."""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomExponentialServer '
code += str(preemptible)
code += ' '
code += str(mean_delay)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
def erlang_random_server(transact_type, scale, shape, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with random delays having the Erlang distribution with the specified scale (a reciprocal of the rate) and shape parameters."""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomErlangServer '
code += str(preemptible)
code += ' '
code += str(scale)
code += ' '
code += str(shape)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
def poisson_random_server(transact_type, mean_delay, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with random delays having the Poisson distribution with the specified mean."""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomPoissonServer '
code += str(preemptible)
code += ' '
code += str(mean_delay)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
def binomial_random_server(transact_type, probability, trials, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with random delays having the binomial distribution with the specified probability and trials."""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomBinomialServer '
code += str(preemptible)
code += ' '
code += str(probability)
code += ' '
code += str(trials)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
def gamma_random_server(transact_type, shape, scale, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with random delays having the Gamma distribution by the specified shape and scale."""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomGammaServer '
code += str(preemptible)
code += ' '
code += str(shape)
code += ' '
code += str(scale)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
def beta_random_server(transact_type, alpha, beta, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with random delays having the Beta distribution by the specified shape parameters (alpha and beta)."""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomBetaServer '
code += str(preemptible)
code += ' '
code += str(alpha)
code += ' '
code += str(beta)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
def weibull_random_server(transact_type, shape, scale, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with random delays having the Weibull distribution by the specified shape and scale."""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomWeibullServer '
code += str(preemptible)
code += ' '
code += str(shape)
code += ' '
code += str(scale)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
def discrete_random_server(transact_type, pdf, preemptible = False, name = None, descr = None):
"""Return a new server that holds the process with random delays having the discrete distribution by the specified probability density function."""
expect_transact_type(transact_type)
tp = transact_type.get_data_type()
model = transact_type.get_model()
code = 'newPreemptibleRandomDiscreteServer '
code += str(preemptible)
code += ' '
code += encode_pdf(pdf)
y = ServerPort(model, UNIT_TYPE, tp, tp, name = name, descr = descr)
y.write(code)
return y
|
<filename>pointnet2/train/train_cls.py
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
import etw_pytorch_utils as pt_utils
import os.path as osp
import os
import argparse
from pointnet2.models import Pointnet2FaceClsSSG as Pointnet
from pointnet2.data import GPMMNormalCurvDataset
import pointnet2.data.data_utils as d_utils
import time
import shutil
from pointnet2.train import layer
import numpy as np
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(
description="Arguments for cls training",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-batch_size", type=int, default=32, help="Batch size")
parser.add_argument(
"-weight_decay", type=float, default=1e-5, help="L2 regularization coeff"
)
parser.add_argument("-lr", type=float, default=1e-3, help="Initial learning rate")
parser.add_argument(
"-model_checkpoint", type=str, default=None, help="Checkpoint to start from"
)
parser.add_argument(
"-cls_checkpoint", type=str, default=None, help="Checkpoint to start from"
)
parser.add_argument(
"-epochs", type=int, default=10, help="Number of epochs to train for"
)
# loss Classifier
parser.add_argument('--num_class', type=int, default=500,
help='number of people(class)')
parser.add_argument('--classifier_type', type=str, default='AL',
help='Which classifier for train. (MCP, AL, L)')
return parser.parse_args()
lr = 1e-3
log_file = './log/train_log.txt'
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
if epoch in [5, 8, 9]:
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.1
def train(train_loader, model, classifier, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
# compute output
input = input.cuda()
target = target.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
target_var = target_var.view(-1)
output = model(input_var)
if isinstance(classifier, torch.nn.Linear):
output = classifier(output)
else:
output = classifier(output, target)
loss = criterion(output, target_var)
optimizer.zero_grad()
_, classes = torch.max(output, -1)
acc = (classes == target_var).float().sum() / target_var.numel()
losses.update(loss.item(), input.size(0))
top1.update(acc, input.size(0))
# compute gradient and do SGD step
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1))
f.writelines('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\n'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1))
def validate(val_loader, model, classifier, criterion):
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (input, target) in enumerate(val_loader):
input = input.cuda()
target = target.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
target_var = target_var.view(-1)
# compute output
output = model(input_var)
if isinstance(classifier, torch.nn.Linear):
output = classifier(output)
else:
output = classifier(output, target)
loss = criterion(output, target_var)
# measure accuracy and record loss
_, classes = torch.max(output, -1)
acc = (classes == target_var).float().sum() / target_var.numel()
losses.update(loss.item(), input.size(0))
top1.update(acc, input.size(0))
print('\nTest set: Average loss: {}, Accuracy: ({})\n'.format(losses.avg, top1.avg))
f.writelines('\nTest set: Average loss: {}, Accuracy: ({})\n'.format(losses.avg, top1.avg))
return top1.avg
def checkpoint_state(model=None,
optimizer=None,
best_prec=None,
epoch=None,
it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.DataParallel):
model_state = model.module.state_dict()
else:
model_state = model.state_dict()
else:
model_state = None
return {
'epoch': epoch,
'it': it,
'best_prec': best_prec,
'model_state': model_state,
'optimizer_state': optim_state
}
def save_checkpoint(state,
is_best,
filename='checkpoint',
bestname='model_best'):
filename = '{}.pth.tar'.format(filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, '{}.pth.tar'.format(bestname))
def get_list(folder):
pt_list = []
class_list = []
gallery_classes = sorted(os.listdir(folder))
for cname in gallery_classes:
cpath = os.path.join(folder, cname)
gallery = os.listdir(cpath)
for gname in gallery:
gpath = os.path.join(cpath, gname)
pt_list.append(gpath)
class_list.append(cname)
return pt_list, class_list
if __name__ == "__main__":
args = parse_args()
f = open(log_file, 'w')
transforms = transforms.Compose(
[
d_utils.PointcloudRotate(axis=np.array([1, 0, 0])),
d_utils.PointcloudRotate(axis=np.array([0, 1, 0])),
d_utils.PointcloudRotate(axis=np.array([0, 0, 1])),
d_utils.PointcloudJitter(std=0.002),
]
)
train_dataset = GPMMNormalCurvDataset(root = '',
class_nums = args.num_class,
transforms=transforms,
train=True,
extensions='bcnc')
print('Train dataset Length: {}'.format(train_dataset.data_length))
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=4,
pin_memory=True,
)
test_dataset = GPMMNormalCurvDataset(root = '',
class_nums = args.num_class,
transforms=None,
train=False,
extensions='bcnc')
print('Eval dataset Length: {}'.format(test_dataset.data_length))
#print(test_set[0][0].shape)
test_loader = DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=4,
pin_memory=True,
)
model = Pointnet(input_channels=3, use_xyz=True)
model.cuda()
# 512 is dimension of feature
classifier = {
'MCP': layer.MarginCosineProduct(512, args.num_class).cuda(),
'AL' : layer.AngleLinear(512, args.num_class).cuda(),
'L' : torch.nn.Linear(512, args.num_class, bias=False).cuda()
}[args.classifier_type]
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(
[{'params': model.parameters()}, {'params': classifier.parameters()}],
lr=lr, weight_decay=args.weight_decay
)
# default value
it = -1
best_prec1 = 0
best_top1 = 0
start_epoch = 1
# load status from checkpoint
if args.model_checkpoint is not None:
checkpoint_status = pt_utils.load_checkpoint(
model, optimizer, filename=args.model_checkpoint.split(".")[0]
)
if checkpoint_status is not None:
it, start_epoch, best_loss = checkpoint_status
if args.cls_checkpoint is not None:
checkpoint_status = pt_utils.load_checkpoint(
classifier, optimizer, filename=args.cls_checkpoint.split(".")[0]
)
if checkpoint_status is not None:
it, start_epoch, best_loss = checkpoint_status
it = max(it, 0) # for the initialize value of `trainer.train`
if not osp.isdir("checkpoints"):
os.makedirs("checkpoints")
## rewrite the training process
checkpoint_name_ori="checkpoints/pointnet2_model"
best_name="checkpoints/pointnet2_model_best"
cls_checkpoint_name="checkpoints/pointnet2_cls"
cls_best_name="checkpoints/pointnet2_cls_best"
eval_frequency = len(train_loader)
for epoch in range(args.epochs):
#lr_f.write()
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, classifier, criterion, optimizer, epoch)
# evaluate on validation set
top1, tpr = validate(test_loader, model, classifier, criterion)
# save the learned parameters
is_best = (top1 * tpr) > best_top1
best_top1 = max(best_top1, top1 * tpr)
checkpoint_name = checkpoint_name_ori + str(epoch)
save_checkpoint(
checkpoint_state(model, optimizer,
top1, args.epochs, epoch),
is_best,
filename=checkpoint_name,
bestname=best_name)
## rewrite the training process end
f.close()
|
<filename>cinder/volume/drivers/dothill/dothill_iscsi.py<gh_stars>0
# Copyright 2014 <NAME>
# Copyright 2015 DotHill Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder import interface
import cinder.volume.driver
from cinder.volume.drivers.dothill import dothill_common as dothillcommon
from cinder.volume.drivers.san import san
DEFAULT_ISCSI_PORT = "3260"
LOG = logging.getLogger(__name__)
@interface.volumedriver
class DotHillISCSIDriver(cinder.volume.driver.ISCSIDriver):
"""OpenStack iSCSI cinder drivers for DotHill Arrays.
Version history:
0.1 - Base structure for DotHill iSCSI drivers based on HPMSA FC
drivers:
"https://github.com/openstack/cinder/tree/stable/juno/
cinder/volume/drivers/san/hp"
1.0 - Version developed for DotHill arrays with the following
modifications:
- added iSCSI support
- added CHAP support in iSCSI
- added support for v3 API(virtual pool feature)
- added support for retype volume
- added support for manage/unmanage volume
- added https support
"""
VERSION = "1.0"
def __init__(self, *args, **kwargs):
super(DotHillISCSIDriver, self).__init__(*args, **kwargs)
self.common = None
self.configuration.append_config_values(dothillcommon.common_opts)
self.configuration.append_config_values(dothillcommon.iscsi_opts)
self.configuration.append_config_values(san.san_opts)
self.iscsi_ips = self.configuration.dothill_iscsi_ips
def _init_common(self):
return dothillcommon.DotHillCommon(self.configuration)
def _check_flags(self):
required_flags = ['san_ip', 'san_login', 'san_password']
self.common.check_flags(self.configuration, required_flags)
def do_setup(self, context):
self.common = self._init_common()
self._check_flags()
self.common.do_setup(context)
self.initialize_iscsi_ports()
def initialize_iscsi_ports(self):
iscsi_ips = []
if self.iscsi_ips:
for ip_addr in self.iscsi_ips:
ip = ip_addr.split(':')
if len(ip) == 1:
iscsi_ips.append([ip_addr, DEFAULT_ISCSI_PORT])
elif len(ip) == 2:
iscsi_ips.append([ip[0], ip[1]])
else:
msg = _("Invalid IP address format: '%s'") % ip_addr
LOG.error(msg)
raise exception.InvalidInput(reason=(msg))
self.iscsi_ips = iscsi_ips
else:
msg = _('At least one valid iSCSI IP address must be set.')
LOG.error(msg)
raise exception.InvalidInput(reason=(msg))
def check_for_setup_error(self):
self._check_flags()
def create_volume(self, volume):
self.common.create_volume(volume)
def create_volume_from_snapshot(self, volume, src_vref):
self.common.create_volume_from_snapshot(volume, src_vref)
def create_cloned_volume(self, volume, src_vref):
self.common.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.common.delete_volume(volume)
def initialize_connection(self, volume, connector):
self.common.client_login()
try:
data = {}
data['target_lun'] = self.common.map_volume(volume,
connector,
'initiator')
iqns = self.common.get_active_iscsi_target_iqns()
data['target_discovered'] = True
data['target_iqn'] = iqns[0]
iscsi_portals = self.common.get_active_iscsi_target_portals()
for ip_port in self.iscsi_ips:
if (ip_port[0] in iscsi_portals):
data['target_portal'] = ":".join(ip_port)
break
if 'target_portal' not in data:
raise exception.DotHillNotTargetPortal()
if self.configuration.use_chap_auth:
chap_secret = self.common.get_chap_record(
connector['initiator']
)
if not chap_secret:
chap_secret = self.create_chap_record(
connector['initiator']
)
data['auth_password'] = chap_secret
data['auth_username'] = connector['initiator']
data['auth_method'] = 'CHAP'
info = {'driver_volume_type': 'iscsi',
'data': data}
return info
finally:
self.common.client_logout()
def terminate_connection(self, volume, connector, **kwargs):
self.common.unmap_volume(volume, connector, 'initiator')
def get_volume_stats(self, refresh=False):
stats = self.common.get_volume_stats(refresh)
stats['storage_protocol'] = 'iSCSI'
stats['driver_version'] = self.VERSION
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = (backend_name or
self.__class__.__name__)
return stats
def create_export(self, context, volume, connector):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def create_snapshot(self, snapshot):
self.common.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.common.delete_snapshot(snapshot)
def extend_volume(self, volume, new_size):
self.common.extend_volume(volume, new_size)
def create_chap_record(self, initiator_name):
chap_secret = self.configuration.chap_password
# Chap secret length should be 12 to 16 characters
if 12 <= len(chap_secret) <= 16:
self.common.create_chap_record(initiator_name, chap_secret)
else:
msg = _('CHAP secret should be 12-16 bytes.')
LOG.error(msg)
raise exception.InvalidInput(reason=(msg))
return chap_secret
def retype(self, context, volume, new_type, diff, host):
return self.common.retype(volume, new_type, diff, host)
def manage_existing(self, volume, existing_ref):
self.common.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.common.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
pass
|
<filename>src/oci/database/models/create_data_guard_association_to_existing_vm_cluster_details.py<gh_stars>0
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .create_data_guard_association_details import CreateDataGuardAssociationDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateDataGuardAssociationToExistingVmClusterDetails(CreateDataGuardAssociationDetails):
"""
The configuration details for creating a Data Guard association for a ExaCC Vmcluster database. For these types of vm cluster databases, the `creationType` should be `ExistingVmCluster`. A standby database will be created in the VM cluster you specify.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateDataGuardAssociationToExistingVmClusterDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.database.models.CreateDataGuardAssociationToExistingVmClusterDetails.creation_type` attribute
of this class is ``ExistingVmCluster`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param database_software_image_id:
The value to assign to the database_software_image_id property of this CreateDataGuardAssociationToExistingVmClusterDetails.
:type database_software_image_id: str
:param database_admin_password:
The value to assign to the database_admin_password property of this CreateDataGuardAssociationToExistingVmClusterDetails.
:type database_admin_password: str
:param protection_mode:
The value to assign to the protection_mode property of this CreateDataGuardAssociationToExistingVmClusterDetails.
Allowed values for this property are: "MAXIMUM_AVAILABILITY", "MAXIMUM_PERFORMANCE", "MAXIMUM_PROTECTION"
:type protection_mode: str
:param transport_type:
The value to assign to the transport_type property of this CreateDataGuardAssociationToExistingVmClusterDetails.
Allowed values for this property are: "SYNC", "ASYNC", "FASTSYNC"
:type transport_type: str
:param creation_type:
The value to assign to the creation_type property of this CreateDataGuardAssociationToExistingVmClusterDetails.
:type creation_type: str
:param is_active_data_guard_enabled:
The value to assign to the is_active_data_guard_enabled property of this CreateDataGuardAssociationToExistingVmClusterDetails.
:type is_active_data_guard_enabled: bool
:param peer_db_unique_name:
The value to assign to the peer_db_unique_name property of this CreateDataGuardAssociationToExistingVmClusterDetails.
:type peer_db_unique_name: str
:param peer_sid_prefix:
The value to assign to the peer_sid_prefix property of this CreateDataGuardAssociationToExistingVmClusterDetails.
:type peer_sid_prefix: str
:param peer_vm_cluster_id:
The value to assign to the peer_vm_cluster_id property of this CreateDataGuardAssociationToExistingVmClusterDetails.
:type peer_vm_cluster_id: str
:param peer_db_home_id:
The value to assign to the peer_db_home_id property of this CreateDataGuardAssociationToExistingVmClusterDetails.
:type peer_db_home_id: str
"""
self.swagger_types = {
'database_software_image_id': 'str',
'database_admin_password': '<PASSWORD>',
'protection_mode': 'str',
'transport_type': 'str',
'creation_type': 'str',
'is_active_data_guard_enabled': 'bool',
'peer_db_unique_name': 'str',
'peer_sid_prefix': 'str',
'peer_vm_cluster_id': 'str',
'peer_db_home_id': 'str'
}
self.attribute_map = {
'database_software_image_id': 'databaseSoftwareImageId',
'database_admin_password': '<PASSWORD>',
'protection_mode': 'protectionMode',
'transport_type': 'transportType',
'creation_type': 'creationType',
'is_active_data_guard_enabled': 'isActiveDataGuardEnabled',
'peer_db_unique_name': 'peerDbUniqueName',
'peer_sid_prefix': 'peerSidPrefix',
'peer_vm_cluster_id': 'peerVmClusterId',
'peer_db_home_id': 'peerDbHomeId'
}
self._database_software_image_id = None
self._database_admin_password = None
self._protection_mode = None
self._transport_type = None
self._creation_type = None
self._is_active_data_guard_enabled = None
self._peer_db_unique_name = None
self._peer_sid_prefix = None
self._peer_vm_cluster_id = None
self._peer_db_home_id = None
self._creation_type = 'ExistingVmCluster'
@property
def peer_vm_cluster_id(self):
"""
Gets the peer_vm_cluster_id of this CreateDataGuardAssociationToExistingVmClusterDetails.
The `OCID`__ of the VM Cluster in which to create the standby database.
You must supply this value if creationType is `ExistingVmCluster`.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The peer_vm_cluster_id of this CreateDataGuardAssociationToExistingVmClusterDetails.
:rtype: str
"""
return self._peer_vm_cluster_id
@peer_vm_cluster_id.setter
def peer_vm_cluster_id(self, peer_vm_cluster_id):
"""
Sets the peer_vm_cluster_id of this CreateDataGuardAssociationToExistingVmClusterDetails.
The `OCID`__ of the VM Cluster in which to create the standby database.
You must supply this value if creationType is `ExistingVmCluster`.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param peer_vm_cluster_id: The peer_vm_cluster_id of this CreateDataGuardAssociationToExistingVmClusterDetails.
:type: str
"""
self._peer_vm_cluster_id = peer_vm_cluster_id
@property
def peer_db_home_id(self):
"""
Gets the peer_db_home_id of this CreateDataGuardAssociationToExistingVmClusterDetails.
The `OCID`__ of the DB home in which to create the standby database.
You must supply this value to create standby database with an existing DB home
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The peer_db_home_id of this CreateDataGuardAssociationToExistingVmClusterDetails.
:rtype: str
"""
return self._peer_db_home_id
@peer_db_home_id.setter
def peer_db_home_id(self, peer_db_home_id):
"""
Sets the peer_db_home_id of this CreateDataGuardAssociationToExistingVmClusterDetails.
The `OCID`__ of the DB home in which to create the standby database.
You must supply this value to create standby database with an existing DB home
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param peer_db_home_id: The peer_db_home_id of this CreateDataGuardAssociationToExistingVmClusterDetails.
:type: str
"""
self._peer_db_home_id = peer_db_home_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
<filename>Driving/gen_gauge.py<gh_stars>1-10
'''
usage: python gen_diff.py -h
'''
from __future__ import print_function
import argparse
from scipy.misc import imsave
from driving_models import *
from utils import *
import pickle
parser = argparse.ArgumentParser(
description='Main function for difference-inducing input generation in Driving dataset')
parser.add_argument('model', help="model to generate highs and lows", type=int)
args = parser.parse_args()
random.seed(4172306)
# input image dimensions
img_rows, img_cols = 100, 100
input_shape = (img_rows, img_cols, 3)
# define input tensor as a placeholder
input_tensor = Input(shape=input_shape)
# load multiple models sharing same input tensor
K.set_learning_phase(0)
model1 = Dave_orig(input_tensor=input_tensor, load_weights=True)
model2 = Dave_norminit(input_tensor=input_tensor, load_weights=True)
model3 = Dave_dropout(input_tensor=input_tensor, load_weights=True)
# init coverage table
model_layer_dict1, model_layer_dict2, model_layer_dict3 = init_coverage_tables(model1, model2, model3)
# partition data into a training and testing set
img_paths = image.list_pictures('./testing/center', ext='jpg')
random.shuffle(img_paths)
testing_set = img_paths[:2000]
training_set = img_paths[2000:]
def update_neuron_bounds(input_data, model, model_layer_dict):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
layer = intermediate_layer_output[0]
for neuron in xrange(num_neurons(layer.shape)): # index through every single (indiv) neuron
v = layer[np.unravel_index(neuron, layer.shape)]
if not model_layer_dict[(layer_names[i], neuron)]: # get rid of mean
model_layer_dict[(layer_names[i], neuron)] = (v, v)
else:
(lower,upper) = model_layer_dict[(layer_names[i], neuron)]
if v > upper:
model_layer_dict[(layer_names[i], neuron)] = (lower, v)
elif v < lower:
model_layer_dict[(layer_names[i], neuron)] = (v, upper)
# ==============================================================================================
# start gen deepgauge bounds
i = 0
if args.model == 1:
for train_img in training_set:
gen_img = preprocess_image(train_img)
update_neuron_bounds(gen_img, model1, model_layer_dict1)
print(i)
i += 1
pickle.dump(model_layer_dict1, open("m1.p", "wb"))
if args.model == 2:
for train_img in training_set:
gen_img = preprocess_image(train_img)
update_neuron_bounds(gen_img, model2, model_layer_dict2)
print(i)
i += 1
pickle.dump(model_layer_dict2, open("m2.p", "wb"))
if args.model == 3:
for train_img in training_set:
gen_img = preprocess_image(train_img)
update_neuron_bounds(gen_img, model3, model_layer_dict3)
print(i)
i += 1
pickle.dump(model_layer_dict3, open("m3.p", "wb"))
|
<filename>tests/devices/test_inkbird_thermostat.py
from unittest import IsolatedAsyncioTestCase, skip
from unittest.mock import AsyncMock, patch
from homeassistant.components.climate.const import (
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
from custom_components.tuya_local.generic.climate import TuyaLocalClimate
from custom_components.tuya_local.helpers.device_config import TuyaDeviceConfig
from ..const import INKBIRD_THERMOSTAT_PAYLOAD
from ..helpers import assert_device_properties_set
ERROR_DPS = "12"
UNIT_DPS = "101"
CALIBRATE_DPS = "102"
PRESET_DPS = "103"
CURRENTTEMP_DPS = "104"
TEMPLOW_DPS = "106"
TIME_THRES_DPS = "108"
HIGH_THRES_DPS = "109"
LOW_THRES_DPS = "110"
ALARM_HIGH_DPS = "111"
ALARM_LOW_DPS = "112"
ALARM_TIME_DPS = "113"
TEMPHIGH_DPS = "114"
SWITCH_DPS = "115"
TEMPF_DPS = "116"
UNKNOWN117_DPS = "117"
UNKNOWN118_DPS = "118"
UNKNOWN119_DPS = "119"
UNKNOWN120_DPS = "120"
class TestInkbirdThermostat(IsolatedAsyncioTestCase):
def setUp(self):
device_patcher = patch("custom_components.tuya_local.device.TuyaLocalDevice")
self.addCleanup(device_patcher.stop)
self.mock_device = device_patcher.start()
cfg = TuyaDeviceConfig("inkbird_thermostat.yaml")
entities = {}
entities[cfg.primary_entity.entity] = cfg.primary_entity
for e in cfg.secondary_entities():
entities[e.entity] = e
self.climate_name = (
"missing" if "climate" not in entities else entities["climate"].name
)
self.subject = TuyaLocalClimate(self.mock_device(), entities.get("climate"))
self.dps = INKBIRD_THERMOSTAT_PAYLOAD.copy()
self.subject._device.get_property.side_effect = lambda id: self.dps[id]
def test_supported_features(self):
self.assertEqual(
self.subject.supported_features,
SUPPORT_TARGET_TEMPERATURE_RANGE | SUPPORT_PRESET_MODE,
)
def test_shouldPoll(self):
self.assertTrue(self.subject.should_poll)
def test_name_returns_device_name(self):
self.assertEqual(self.subject.name, self.subject._device.name)
def test_friendly_name_returns_config_name(self):
self.assertEqual(self.subject.friendly_name, self.climate_name)
def test_unique_id_returns_device_unique_id(self):
self.assertEqual(self.subject.unique_id, self.subject._device.unique_id)
def test_device_info_returns_device_info_from_device(self):
self.assertEqual(self.subject.device_info, self.subject._device.device_info)
def test_icon(self):
"""Test that the icon is as expected."""
self.dps[ALARM_HIGH_DPS] = False
self.dps[ALARM_LOW_DPS] = False
self.dps[ALARM_TIME_DPS] = False
self.dps[SWITCH_DPS] = True
self.assertEqual(self.subject.icon, "mdi:thermometer")
self.dps[SWITCH_DPS] = False
self.assertEqual(self.subject.icon, "mdi:thermometer-off")
self.dps[ALARM_HIGH_DPS] = True
self.assertEqual(self.subject.icon, "mdi:thermometer-alert")
self.dps[SWITCH_DPS] = True
self.assertEqual(self.subject.icon, "mdi:thermometer-alert")
self.dps[ALARM_HIGH_DPS] = False
self.dps[ALARM_LOW_DPS] = True
self.assertEqual(self.subject.icon, "mdi:thermometer-alert")
self.dps[ALARM_LOW_DPS] = False
self.dps[ALARM_TIME_DPS] = True
self.assertEqual(self.subject.icon, "mdi:thermometer-alert")
def test_climate_hvac_modes(self):
self.assertEqual(self.subject.hvac_modes, [])
def test_preset_mode(self):
self.dps[PRESET_DPS] = "on"
self.assertEqual(self.subject.preset_mode, "On")
self.dps[PRESET_DPS] = "pause"
self.assertEqual(self.subject.preset_mode, "Pause")
self.dps[PRESET_DPS] = "off"
self.assertEqual(self.subject.preset_mode, "Off")
self.dps[PRESET_DPS] = None
self.assertEqual(self.subject.preset_mode, None)
def test_preset_modes(self):
self.assertCountEqual(
self.subject.preset_modes,
{"On", "Pause", "Off"},
)
async def test_set_preset_to_on(self):
async with assert_device_properties_set(
self.subject._device,
{
PRESET_DPS: "on",
},
):
await self.subject.async_set_preset_mode("On")
self.subject._device.anticipate_property_value.assert_not_called()
async def test_set_preset_to_pause(self):
async with assert_device_properties_set(
self.subject._device,
{
PRESET_DPS: "pause",
},
):
await self.subject.async_set_preset_mode("Pause")
self.subject._device.anticipate_property_value.assert_not_called()
async def test_set_preset_to_off(self):
async with assert_device_properties_set(
self.subject._device,
{
PRESET_DPS: "off",
},
):
await self.subject.async_set_preset_mode("Off")
self.subject._device.anticipate_property_value.assert_not_called()
def test_current_temperature(self):
self.dps[CURRENTTEMP_DPS] = 289
self.assertEqual(self.subject.current_temperature, 28.9)
def test_temperature_unit(self):
self.dps[UNIT_DPS] = "F"
self.assertEqual(self.subject.temperature_unit, TEMP_FAHRENHEIT)
self.dps[UNIT_DPS] = "C"
self.assertEqual(self.subject.temperature_unit, TEMP_CELSIUS)
def test_temperature_range(self):
self.dps[TEMPHIGH_DPS] = 301
self.dps[TEMPLOW_DPS] = 255
self.assertEqual(self.subject.target_temperature_high, 30.1)
self.assertEqual(self.subject.target_temperature_low, 25.5)
async def test_set_temperature_range(self):
async with assert_device_properties_set(
self.subject._device,
{
TEMPHIGH_DPS: 322,
TEMPLOW_DPS: 266,
},
):
await self.subject.async_set_temperature(
target_temp_high=32.2, target_temp_low=26.6
)
def test_device_state_attributes(self):
self.dps[ERROR_DPS] = 1
self.dps[CALIBRATE_DPS] = 1
self.dps[TIME_THRES_DPS] = 5
self.dps[HIGH_THRES_DPS] = 400
self.dps[LOW_THRES_DPS] = 300
self.dps[ALARM_HIGH_DPS] = True
self.dps[ALARM_LOW_DPS] = False
self.dps[ALARM_TIME_DPS] = True
self.dps[SWITCH_DPS] = False
self.dps[TEMPF_DPS] = 999
self.dps[UNKNOWN117_DPS] = True
self.dps[UNKNOWN118_DPS] = False
self.dps[UNKNOWN119_DPS] = True
self.dps[UNKNOWN120_DPS] = False
self.assertCountEqual(
self.subject.device_state_attributes,
{
"error": 1,
"temperature_calibration_offset": 0.1,
"heat_time_alarm_threshold_hours": 5,
"high_temp_alarm_threshold": 40.0,
"low_temp_alarm_threshold": 30.0,
"high_temp_alarm": True,
"low_temp_alarm": False,
"heat_time_alarm": True,
"switch_state": False,
"current_temperature_f": 99.9,
"unknown_117": True,
"unknown_118": False,
"unknown_119": True,
"unknown_120": False,
},
)
async def test_update(self):
result = AsyncMock()
self.subject._device.async_refresh.return_value = result()
await self.subject.async_update()
self.subject._device.async_refresh.assert_called_once()
result.assert_awaited()
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Created by: <NAME>
# Created date: 11/03/2021
# Copyright 2021 InferStat Ltd
"""
This submodule includes facilities for operations such as converting positions to price predictions and vice versa.
"""
from copy import deepcopy
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline, FeatureUnion
from sklearn.preprocessing import FunctionTransformer, Binarizer
from infertrade.utilities.performance import calculate_portfolio_performance_python
from infertrade.PandasEnum import PandasEnum, create_price_column_from_synonym
def pct_chg(x: Union[np.ndarray, pd.Series]) -> np.ndarray:
"""Percentage change between the current and a prior element.
Args:
x: A numpy.ndarray or pandas.Series object
Returns:
A numpy.ndarray with the results
"""
x = x.astype("float64")
if isinstance(x, pd.DataFrame):
pc = x.pct_change().values.reshape(-1, 1)
else:
x = np.reshape(x, (-1,))
x_df = pd.Series(x, name="x")
pc = x_df.pct_change().values.reshape(-1, 1)
return pc
def lag(x: Union[np.ndarray, pd.Series], shift: int = 1) -> np.ndarray:
"""Lag (shift) series by desired number of periods.
Args:
x: A numpy.ndarray or pandas.Series object
shift: The number of periods by which to shift the input time series
Returns:
A numpy.ndarray with the results
"""
x = x.astype("float64")
lagged_array = np.roll(x, shift=shift, axis=0)
lagged_array[:shift, :] = np.nan
return lagged_array
def research_over_price_minus_one(x: Union[np.ndarray, pd.Series], shift: int) -> np.ndarray:
"""Difference of two lagged log series.
Args:
x: A numpy.ndarray or pandas.Series object with exactly two columns
shift: The number of periods by which the lag both series
Returns:
A numpy.ndarray with the results
"""
x = np.array(x)
x = x.astype("float64")
if x.shape[1] != 2:
raise IndexError(f"Number of columns must be 2.")
lag_trans = FunctionTransformer(lag, kw_args={"shift": shift})
pmr_pipe = make_pipeline(lag_trans)
lagged = pmr_pipe.fit_transform(x)
pmr = lagged[:, [1]] / lagged[:, [0]] - 1
return pmr
class PricePredictionFromSignalRegression(TransformerMixin, BaseEstimator):
"""This class creates price predictions from signal values.
Attributes:
market_to_trade: The name of the column which contains the historical prices.
"""
def __init__(self, market_to_trade: str = None):
"""Construction method for class PricePredictionFromPositions.
Args:
market_to_trade: The name of the column which contains the historical prices.
Returns:
None
"""
if not market_to_trade:
# We default to "price" as the target.
market_to_trade = PandasEnum.MID.value
self.market_to_trade = market_to_trade
def fit(self, X: np.array, y=None):
self.fitted_features_and_target_ = None
return self
def transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
"""This method transforms a signal input to a price prediction.
Args:
X: A pandas.DataFrame object
Returns:
A pandas.DataFrame object
"""
X_ = deepcopy(X)
create_price_column_from_synonym(X_)
regression_period = 120
forecast_period = min(regression_period, len(X_))
prediction_indices = self._get_model_prediction_indices(len(X_), regression_period, forecast_period)
self._fit_features_matrix_target_array(X_)
historical_signal_levels, historical_price_moves = self._get_features_matrix_target_array(X_)
for ii_day in range(len(prediction_indices)):
model_idx = prediction_indices[ii_day]["model_idx"]
prediction_idx = prediction_indices[ii_day]["prediction_idx"]
# Fit model
regression_period_signal = historical_signal_levels[model_idx, :]
regression_period_price_change = historical_price_moves[model_idx]
rolling_regression_model = LinearRegression().fit(regression_period_signal, regression_period_price_change)
# Predictions
current_research = historical_signal_levels[prediction_idx, :]
forecast = rolling_regression_model.predict(current_research)
# Apply the calculated allocation to the dataframe.
X_.loc[prediction_idx, PandasEnum.FORECAST_PRICE_CHANGE.value] = forecast
if len(prediction_indices) == 0:
X_[PandasEnum.FORECAST_PRICE_CHANGE.value] = 0
else:
X_[PandasEnum.FORECAST_PRICE_CHANGE.value].shift(-1)
return X_
def _get_features_matrix_transformer(self) -> ColumnTransformer:
"""
1. Percent change of research series as predictor.
2. Research series level as predictor.
"""
percent_change_trans = FunctionTransformer(pct_chg)
lag_1 = FunctionTransformer(lag, kw_args={"shift": 1})
lag_pct = make_pipeline(lag_1, percent_change_trans)
lp_m_lr_l1 = FunctionTransformer(research_over_price_minus_one, kw_args={"shift": 1})
features = ColumnTransformer(
[
("signal", lag_1, ["signal"]),
("signal_changes", lag_pct, ["signal"]),
("signal_differences", lp_m_lr_l1, [self.market_to_trade, "signal"]),
]
)
self.feature_names = ["signal", "signal_changes", "signal_differences"]
return features
def _get_features_matrix_target_array(self, input_time_series: pd.DataFrame) -> Tuple[pd.Series, pd.Series]:
"""Returns the target array features."""
feat_tar_arr = self.fitted_features_and_target_.transform(input_time_series)
feat_tar_arr = np.nan_to_num(feat_tar_arr, nan=0.0, posinf=0.0, neginf=0.0)
features = np.delete(feat_tar_arr, -1, axis=1)
target = feat_tar_arr[:, -1]
return features, target
def _fit_features_matrix_target_array(self, X: pd.DataFrame):
"""Get features matrix and target array. TODO - more description helpful."""
features = self._get_features_matrix_transformer()
target = self._get_target_array_transformer()
feat_tar = FeatureUnion(transformer_list=[("features", features), ("target", target)])
self.fitted_features_and_target_ = feat_tar.fit(X)
def _get_target_array_transformer(self):
"""Use level of price series as target (dependant) variable."""
percent_change_trans = FunctionTransformer(pct_chg)
target = ColumnTransformer([("historical_price_moves", percent_change_trans, [self.market_to_trade])])
self.target_name = ["historical_price_moves"]
return target
@staticmethod
def _get_model_prediction_indices(series_length: int, reg_period: int, forecast_period: int) -> List[dict]:
"""
Create list of ranges for rolling regression.
Parameters
----------
series_length - total length of series
reg_period - regression period
forecast_period - forecast period
Returns
-------
- model_idx are ranges for model fitting
- prediction_idx are ranges for forecasting
Examples
--------
{'model_idx': range(0, 50), 'prediction_idx': range(50, 60)}
{'model_idx': range(10, 60), 'prediction_idx': range(60, 70)}
{'model_idx': range(20, 70), 'prediction_idx': range(70, 80)}
{'model_idx': range(30, 80), 'prediction_idx': range(80, 90)}
{'model_idx': range(40, 90), 'prediction_idx': range(90, 100)}
"""
indices_for_prediction = []
for i in range(0, series_length - reg_period, forecast_period):
# idx for model
ind_start = i
ind_end = i + reg_period
# indices_for_prediction
ind_pred_start = ind_end
ind_pred_end = ind_pred_start + forecast_period
if ind_pred_end > series_length:
ind_pred_end = series_length
indices_for_prediction.append(
{"model_idx": range(ind_start, ind_end), "prediction_idx": range(ind_pred_start, ind_pred_end)}
)
return indices_for_prediction
class PositionsFromPricePrediction(TransformerMixin, BaseEstimator):
"""This class calculates the positions to take assuming Kelly Criterion."""
def __init__(self):
"""Construction method for class PositionsFromPricePrediction.
Args:
None
Returns:
None
"""
pass
def fit(self, X, y=None):
"""This method is not used."""
return self
def transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
"""This method calculates the positions to be taken based on the forecast price, assuming the Kelly Criterion.
Args:
X: A pandas.DataFrame object
Returns:
A pandas.DataFrame object
"""
X_ = deepcopy(X)
volatility = 0.1
kelly_fraction = 1.0
kelly_recommended_optimum = X[PandasEnum.FORECAST_PRICE_CHANGE.value] / volatility ** 2
rule_recommended_allocation = kelly_fraction * kelly_recommended_optimum
X_[PandasEnum.ALLOCATION.value] = rule_recommended_allocation
return X_
class PricePredictionFromPositions(TransformerMixin, BaseEstimator):
"""This class converts positions into implicit price predictions based on the Kelly Criterion and an assumed volatility."""
def __init__(self):
"""Construction method for class PricePredictionFromPositions.
Args:
None
Returns:
None
"""
pass
def fit(self, X, y=None):
"""This method is not used."""
return self
def transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
"""This method converts allocations into the forecast one-day price changes.
Args:
X: A pandas.DataFrame object
Returns:
A pandas.DataFrame object
"""
X_ = deepcopy(X)
volatility = 0.1
kelly_fraction = 1.0
kelly_recommended_optimum = X_[PandasEnum.ALLOCATION.value] / kelly_fraction
X_["PandasEnum.FORECAST_PRICE_CHANGE.value"] = kelly_recommended_optimum * volatility ** 2
return X_
class ReturnsFromPositions(TransformerMixin, BaseEstimator):
"""This class calculates returns from positions."""
def __init__(self):
"""Construction method for class ReturnsFromPositions.
Args:
None
Returns:
None
"""
pass
def fit(self, X, y=None):
"""This method is not used."""
return self
def transform(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
"""This method converts positions into the cumulative portfolio return.
Args:
X: A pandas.DataFrame object
Returns:
A pandas.DataFrame object
"""
X_1 = deepcopy(X)
X_2 = deepcopy(X)
X_1[PandasEnum.VALUATION.value] = calculate_portfolio_performance_python(X_2)[PandasEnum.VALUATION.value]
return X_1
def scikit_allocation_factory(allocation_function: callable) -> FunctionTransformer:
"""This function creates a SciKit Learn compatible Transformer embedding the position calculation.
Args:
allocation_function: A function to be turned into a sklearn.preprocessing.FunctionTransformer
Returns:
A sklearn.preprocessing.FunctionTransformer
"""
return FunctionTransformer(allocation_function)
|
<reponame>robbrockbank/felix<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Metaswitch Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
felix.ipsets
~~~~~~~~~~~~
IP sets management functions.
"""
from collections import defaultdict
from itertools import chain
import logging
from calico.felix import futils
from calico.felix.futils import IPV4, IPV6, FailedSystemCall
from calico.felix.actor import actor_message, Actor
from calico.felix.refcount import ReferenceManager, RefCountedActor
_log = logging.getLogger(__name__)
FELIX_PFX = "felix-"
IPSET_PREFIX = {IPV4: FELIX_PFX+"v4-", IPV6: FELIX_PFX+"v6-"}
IPSET_TMP_PREFIX = {IPV4: FELIX_PFX+"tmp-v4-", IPV6: FELIX_PFX+"tmp-v6-"}
class IpsetManager(ReferenceManager):
def __init__(self, ip_type):
"""
Manages all the ipsets for tags for either IPv4 or IPv6.
:param ip_type: IP type (IPV4 or IPV6)
"""
super(IpsetManager, self).__init__(qualifier=ip_type)
self.ip_type = ip_type
# State.
# Tag IDs indexed by profile IDs
self.tags_by_prof_id = {}
# EndpointData "structs" indexed by EndpointId.
self.endpoint_data_by_ep_id = {}
# Main index. Since an IP address can be assigned to multiple
# endpoints, we need to track which endpoints reference an IP. When
# we find the set of endpoints with an IP is empty, we remove the
# ip from the tag.
# ip_owners_by_tag[tag][ip][profile_id] = set([combined_id,
# combined_id2, ...])
# Here "combined_id" is an EndpointId object.
self.ip_owners_by_tag = defaultdict(
lambda: defaultdict(lambda: defaultdict(set)))
# Set of EndpointId objects referenced by profile IDs.
self.endpoint_ids_by_profile_id = defaultdict(set)
# Set of tag IDs that may be out of sync. Accumulated by the
# index-update functions. We apply the updates in _finish_msg_batch().
# May include non-live tag IDs.
self._dirty_tags = set()
self._force_reprogram = False
def _create(self, tag_id):
active_ipset = TagIpset(futils.uniquely_shorten(tag_id, 16),
self.ip_type)
return active_ipset
def _on_object_started(self, tag_id, active_ipset):
_log.debug("TagIpset actor for %s started", tag_id)
# Fill the ipset in with its members, this will trigger its first
# programming, after which it will call us back to tell us it is ready.
# We can't use self._dirty_tags to defer this in case the set becomes
# unreferenced before _finish_msg_batch() is called.
self._update_active_ipset(tag_id)
def _update_active_ipset(self, tag_id):
"""
Replaces the members of the identified TagIpset with the
current set.
:param tag_id: The ID of the tag, must be an active tag.
"""
assert self._is_starting_or_live(tag_id)
active_ipset = self.objects_by_id[tag_id]
members = self.ip_owners_by_tag.get(tag_id, {}).keys()
active_ipset.replace_members(set(members),
force_reprogram=self._force_reprogram,
async=True)
def _update_dirty_active_ipsets(self):
"""
Updates the members of any live ActiveIpsets that are marked dirty.
Clears the set of dirty tags as a side-effect.
"""
for tag_id in self._dirty_tags:
if self._is_starting_or_live(tag_id):
self._update_active_ipset(tag_id)
self._maybe_yield()
self._dirty_tags.clear()
@property
def nets_key(self):
nets = "ipv4_nets" if self.ip_type == IPV4 else "ipv6_nets"
return nets
@actor_message()
def apply_snapshot(self, tags_by_prof_id, endpoints_by_id):
"""
Apply a snapshot read from etcd, replacing existing state.
:param tags_by_prof_id: A dict mapping security profile ID to a list of
profile tags.
:param endpoints_by_id: A dict mapping EndpointId objects to endpoint
data dicts.
"""
_log.info("Applying tags snapshot. %s tags, %s endpoints",
len(tags_by_prof_id), len(endpoints_by_id))
missing_profile_ids = set(self.tags_by_prof_id.keys())
for profile_id, tags in tags_by_prof_id.iteritems():
assert tags is not None
self.on_tags_update(profile_id, tags)
missing_profile_ids.discard(profile_id)
self._maybe_yield()
for profile_id in missing_profile_ids:
self.on_tags_update(profile_id, None)
self._maybe_yield()
del missing_profile_ids
missing_endpoints = set(self.endpoint_data_by_ep_id.keys())
for endpoint_id, endpoint in endpoints_by_id.iteritems():
assert endpoint is not None
endpoint_data = self._endpoint_data_from_dict(endpoint_id,
endpoint)
self._on_endpoint_data_update(endpoint_id, endpoint_data)
missing_endpoints.discard(endpoint_id)
self._maybe_yield()
for endpoint_id in missing_endpoints:
self._on_endpoint_data_update(endpoint_id, EMPTY_ENDPOINT_DATA)
self._maybe_yield()
self._force_reprogram = True
_log.info("Tags snapshot applied: %s tags, %s endpoints",
len(tags_by_prof_id), len(endpoints_by_id))
@actor_message()
def cleanup(self):
"""
Clean up left-over ipsets that existed at start-of-day.
"""
_log.info("Cleaning up left-over ipsets.")
all_ipsets = list_ipset_names()
# only clean up our own rubbish.
pfx = IPSET_PREFIX[self.ip_type]
tmppfx = IPSET_TMP_PREFIX[self.ip_type]
felix_ipsets = set([n for n in all_ipsets if (n.startswith(pfx) or
n.startswith(tmppfx))])
whitelist = set()
live_ipsets = self.objects_by_id.itervalues()
# stopping_objects_by_id is a dict of sets of TagIpset objects,
# chain them together.
stopping_ipsets = chain.from_iterable(
self.stopping_objects_by_id.itervalues())
for ipset in chain(live_ipsets, stopping_ipsets):
# Ask the ipset for all the names it may use and whitelist.
whitelist.update(ipset.owned_ipset_names())
_log.debug("Whitelisted ipsets: %s", whitelist)
ipsets_to_delete = felix_ipsets - whitelist
_log.debug("Deleting ipsets: %s", ipsets_to_delete)
# Delete the ipsets before we return. We can't queue these up since
# that could conflict if someone increffed one of the ones we're about
# to delete.
for ipset_name in ipsets_to_delete:
try:
futils.check_call(["ipset", "destroy", ipset_name])
except FailedSystemCall:
_log.exception("Failed to clean up dead ipset %s, will "
"retry on next cleanup.", ipset_name)
@actor_message()
def on_tags_update(self, profile_id, tags):
"""
Called when the tag list of the given profile has changed or been
deleted.
Updates the indices and notifies any live TagIpset objects of any
any changes that affect them.
:param str profile_id: Profile ID affected.
:param list[str]|NoneType tags: List of tags for the given profile or
None if deleted.
"""
_log.info("Tags for profile %s updated", profile_id)
# General approach is to default to the empty list if the new/old
# tag list is missing; then add/delete falls out: all the tags will
# end up in added_tags/removed_tags.
old_tags = set(self.tags_by_prof_id.get(profile_id, []))
new_tags = set(tags or [])
# Find the endpoints that use these tags and work out what tags have
# been added/removed.
endpoint_ids = self.endpoint_ids_by_profile_id.get(profile_id, set())
added_tags = new_tags - old_tags
removed_tags = old_tags - new_tags
_log.debug("Endpoint IDs with this profile: %s", endpoint_ids)
_log.debug("Profile %s added tags: %s", profile_id, added_tags)
_log.debug("Profile %s removed tags: %s", profile_id, removed_tags)
for endpoint_id in endpoint_ids:
endpoint = self.endpoint_data_by_ep_id.get(endpoint_id,
EMPTY_ENDPOINT_DATA)
ip_addrs = endpoint.ip_addresses
for tag_id in removed_tags:
for ip in ip_addrs:
self._remove_mapping(tag_id, profile_id, endpoint_id, ip)
for tag_id in added_tags:
for ip in ip_addrs:
self._add_mapping(tag_id, profile_id, endpoint_id, ip)
if tags is None:
_log.info("Tags for profile %s deleted", profile_id)
self.tags_by_prof_id.pop(profile_id, None)
else:
self.tags_by_prof_id[profile_id] = tags
@actor_message()
def on_endpoint_update(self, endpoint_id, endpoint):
"""
Update tag memberships and indices with the new endpoint dict.
:param EndpointId endpoint_id: ID of the endpoint.
:param dict|NoneType endpoint: Either a dict containing endpoint
information or None to indicate deletion.
"""
endpoint_data = self._endpoint_data_from_dict(endpoint_id, endpoint)
self._on_endpoint_data_update(endpoint_id, endpoint_data)
def _endpoint_data_from_dict(self, endpoint_id, endpoint_dict):
"""
Convert the endpoint dict, which may be large, into a struct-like
object in order to save occupancy.
As an optimization, if the endpoint doesn't contain any data relevant
to this manager, returns EMPTY_ENDPOINT_DATA.
:param dict|None endpoint_dict: The data model endpoint dict or None.
:return: An EndpointData object containing the data. If the input
was None, EMPTY_ENDPOINT_DATA is returned.
"""
if endpoint_dict is not None:
profile_ids = endpoint_dict.get("profile_ids", [])
nets_list = endpoint_dict.get(self.nets_key, [])
if profile_ids and nets_list:
# Optimization: only return an object if this endpoint makes
# some contribution to the IP addresses in the tags.
ips = map(futils.net_to_ip, nets_list)
return EndpointData(profile_ids, ips)
else:
_log.debug("Endpoint makes no contribution, "
"treating as missing: %s", endpoint_id)
return EMPTY_ENDPOINT_DATA
def _on_endpoint_data_update(self, endpoint_id, endpoint_data):
"""
Update tag memberships and indices with the new EndpointData
object.
:param EndpointId endpoint_id: ID of the endpoint.
:param EndpointData endpoint_data: An EndpointData object
EMPTY_ENDPOINT_DATA to indicate deletion (or endpoint being
optimized out).
"""
# Endpoint updates are the most complex to handle because they may
# change the profile IDs (and hence the set of tags) as well as the
# ip addresses attached to the interface. In addition, the endpoint
# may or may not have existed before.
#
# General approach: force all the possibilities through the same
# update loops by defaulting values. For example, if there was no
# previous endpoint then we default old_tags to the empty set. Then,
# when we calculate removed_tags, we'll get the empty set and the
# removal loop will be skipped.
old_endpoint = self.endpoint_data_by_ep_id.pop(endpoint_id,
EMPTY_ENDPOINT_DATA)
old_prof_ids = old_endpoint.profile_ids
old_tags = set()
for profile_id in old_prof_ids:
for tag in self.tags_by_prof_id.get(profile_id, []):
old_tags.add((profile_id, tag))
if endpoint_data != EMPTY_ENDPOINT_DATA:
# EMPTY_ENDPOINT_DATA represents a deletion (or that the endpoint
# has been optimized out earlier in the pipeline). Only store
# off real endpoints.
_log.debug("Endpoint %s updated", endpoint_id)
self.endpoint_data_by_ep_id[endpoint_id] = endpoint_data
new_prof_ids = endpoint_data.profile_ids
new_tags = set()
for profile_id in new_prof_ids:
for tag in self.tags_by_prof_id.get(profile_id, []):
new_tags.add((profile_id, tag))
if new_prof_ids != old_prof_ids:
# Profile ID changed, or an add/delete. the _xxx_profile_index
# methods ignore profile_id == None so we'll do the right thing.
_log.debug("Profile IDs changed from %s to %s",
old_prof_ids, new_prof_ids)
self._remove_profile_index(old_prof_ids, endpoint_id)
self._add_profile_index(new_prof_ids, endpoint_id)
# Since we've defaulted new/old_tags to set() if needed, we can
# use set operations to calculate the tag changes.
added_tags = new_tags - old_tags
unchanged_tags = new_tags & old_tags
removed_tags = old_tags - new_tags
# These default to set() if there are no IPs.
old_ips = old_endpoint.ip_addresses
new_ips = endpoint_data.ip_addresses
# Add *new* IPs to new tags. On a deletion, added_tags will be empty.
# Do this first to avoid marking ipsets as dirty if an endpoint moves
# from one profile to another but keeps the same tag.
for profile_id, tag in added_tags:
for ip in new_ips:
self._add_mapping(tag, profile_id, endpoint_id, ip)
# Change IPs in unchanged tags.
added_ips = new_ips - old_ips
removed_ips = old_ips - new_ips
for profile_id, tag in unchanged_tags:
for ip in removed_ips:
self._remove_mapping(tag, profile_id, endpoint_id, ip)
for ip in added_ips:
self._add_mapping(tag, profile_id, endpoint_id, ip)
# Remove *all* *old* IPs from removed tags. For a deletion, only this
# loop will fire.
for profile_id, tag in removed_tags:
for ip in old_ips:
self._remove_mapping(tag, profile_id, endpoint_id, ip)
def _add_mapping(self, tag_id, profile_id, endpoint_id, ip_address):
"""
Adds the given tag->IP->profile->endpoint mapping to the index.
Marks the tag as dirty if the update resulted in the IP being
newly added.
:param str tag_id: Tag ID
:param str profile_id: Profile ID
:param EndpointId endpoint_id: ID of the endpoint
:param str ip_address: IP address to add
"""
ip_added = not bool(self.ip_owners_by_tag[tag_id][ip_address])
ep_ids = self.ip_owners_by_tag[tag_id][ip_address][profile_id]
ep_ids.add(endpoint_id)
if ip_added:
self._dirty_tags.add(tag_id)
def _remove_mapping(self, tag_id, profile_id, endpoint_id, ip_address):
"""
Removes the tag->IP->profile->endpoint mapping from index.
Marks the tag as dirty if the update resulted in the IP being
removed.
:param str tag_id: Tag ID
:param str profile_id: Profile ID
:param EndpointId endpoint_id: ID of the endpoint
:param str ip_address: IP address to remove
"""
ep_ids = self.ip_owners_by_tag[tag_id][ip_address][profile_id]
ep_ids.discard(endpoint_id)
if not ep_ids:
del self.ip_owners_by_tag[tag_id][ip_address][profile_id]
if not self.ip_owners_by_tag[tag_id][ip_address]:
del self.ip_owners_by_tag[tag_id][ip_address]
self._dirty_tags.add(tag_id)
if not self.ip_owners_by_tag[tag_id]:
del self.ip_owners_by_tag[tag_id]
def _add_profile_index(self, prof_ids, endpoint_id):
"""
Notes in the index that an endpoint uses the given profiles.
:param set[str] prof_ids: set of profile IDs that the endpoint is in.
:param EndpointId endpoint_id: ID of the endpoint
"""
for prof_id in prof_ids:
self.endpoint_ids_by_profile_id[prof_id].add(endpoint_id)
def _remove_profile_index(self, prof_ids, endpoint_id):
"""
Notes in the index that an endpoint no longer uses any of the
given profiles.
:param set[str] prof_ids: set of profile IDs to remove the endpoint
from.
:param EndpointId endpoint_id: ID of the endpoint
"""
for prof_id in prof_ids:
endpoints = self.endpoint_ids_by_profile_id[prof_id]
endpoints.discard(endpoint_id)
if not endpoints:
_log.debug("No more endpoints use profile %s", prof_id)
del self.endpoint_ids_by_profile_id[prof_id]
def _finish_msg_batch(self, batch, results):
"""
Called after a batch of messages is finished, processes any
pending TagIpset member updates.
Doing that here allows us to lots of updates into one replace
operation. It also avoid wasted effort if tags are flapping.
"""
super(IpsetManager, self)._finish_msg_batch(batch, results)
_log.info("Finishing batch, sending updates to any dirty tags..")
self._update_dirty_active_ipsets()
self._force_reprogram = False
_log.info("Finished sending updates to dirty tags.")
class EndpointData(object):
"""
Space-efficient read-only 'struct' to hold only the endpoint data
that we need.
"""
__slots__ = ["_profile_ids", "_ip_addresses"]
def __init__(self, profile_ids, ip_addresses):
"""
:param sequence profile_ids: The profile IDs for the endpoint.
:param sequence ip_addresses: IP addresses for the endpoint.
"""
# Note: profile IDs are ordered in the data model but the ipsets
# code doesn't care about the ordering so it's safe to sort these here
# for comparison purposes.
self._profile_ids = tuple(sorted(profile_ids))
self._ip_addresses = tuple(sorted(ip_addresses))
@property
def profile_ids(self):
""":returns set[str]: profile IDs."""
# Generate set on demand to keep occupancy down. 250B overhead for a
# set vs 64 for a tuple.
return set(self._profile_ids)
@property
def ip_addresses(self):
""":returns set[str]: IP addresses."""
# Generate set on demand to keep occupancy down. 250B overhead for a
# set vs 64 for a tuple.
return set(self._ip_addresses)
def __repr__(self):
return self.__class__.__name__ + "(%s,%s)" % (self._profile_ids,
self._ip_addresses)
def __eq__(self, other):
if other is self:
return True
if not isinstance(other, EndpointData):
return False
return (other._profile_ids == self._profile_ids and
other._ip_addresses == self._ip_addresses)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._profile_ids) + hash(self._ip_addresses)
EMPTY_ENDPOINT_DATA = EndpointData([], [])
class IpsetActor(Actor):
"""
Actor managing a single ipset.
Batches up updates to minimise the number of actual dataplane updates.
"""
def __init__(self, ipset, qualifier=None):
"""
:param Ipset ipset: Ipset object to wrap.
:param str qualifier: Actor qualifier string for logging.
"""
super(IpsetActor, self).__init__(qualifier=qualifier)
self._ipset = ipset
# Members - which entries should be in the ipset.
self.members = set()
# Members which really are in the ipset.
self.programmed_members = None
self._force_reprogram = False
self.stopped = False
@property
def ipset_name(self):
"""
The name of the primary ipset. Safe to access from another greenlet;
only accesses immutable state.
"""
return self._ipset.set_name
def owned_ipset_names(self):
"""
This method is safe to call from another greenlet; it only accesses
immutable state.
:return: set of name of ipsets that this Actor owns and manages. the
sets may or may not be present.
"""
return set([self._ipset.set_name, self._ipset.temp_set_name])
@actor_message()
def replace_members(self, members, force_reprogram=False):
"""
Replace the members of this ipset with the supplied set.
:param set[str]|list[str] members: IP address strings.
"""
_log.info("Replacing members of ipset %s", self.name)
self.members.clear()
self.members.update(members)
self._force_reprogram |= force_reprogram
def _finish_msg_batch(self, batch, results):
_log.debug("IpsetActor._finish_msg_batch() called")
if not self.stopped and (self._force_reprogram or
self.members != self.programmed_members):
_log.debug("IpsetActor not in sync, updating dataplane.")
self._sync_to_ipset()
self._force_reprogram = False
def _sync_to_ipset(self):
_log.info("Rewriting %s with %d members.", self, len(self.members))
_log.debug("Setting ipset %s to %s", self, self.members)
# Defer to our helper.
self._ipset.replace_members(self.members)
# We have got the set into the correct state.
self.programmed_members = self.members.copy()
def __str__(self):
return (
self.__class__.__name__ + "<queue_len=%s,live=%s,msg=%s,"
"name=%s>" %
(
self._event_queue.qsize(),
bool(self.greenlet),
self._current_msg,
self.name,
)
)
class TagIpset(IpsetActor, RefCountedActor):
"""
Specialised, RefCountedActor managing a single tag's ipset.
"""
def __init__(self, tag, ip_type):
"""
:param str tag: Name of tag that this ipset represents. Note: not
the name of the ipset itself. The name of the ipset is derived
from this value.
:param ip_type: One of the constants, futils.IPV4 or futils.IPV6
"""
self.tag = tag
name = tag_to_ipset_name(ip_type, tag)
tmpname = tag_to_ipset_name(ip_type, tag, tmp=True)
family = "inet" if ip_type == IPV4 else "inet6"
# Helper class, used to do atomic rewrites of ipsets.
ipset = Ipset(name, tmpname, family, "hash:ip")
super(TagIpset, self).__init__(ipset, qualifier=tag)
# Notified ready?
self.notified_ready = False
@actor_message()
def on_unreferenced(self):
# Mark the object as stopped so that we don't accidentally recreate
# the ipset in _finish_msg_batch.
self.stopped = True
try:
self._ipset.delete()
finally:
self._notify_cleanup_complete()
def _finish_msg_batch(self, batch, results):
_log.debug("_finish_msg_batch on TagIpset")
super(TagIpset, self)._finish_msg_batch(batch, results)
if not self.notified_ready:
# We have created the set, so we are now ready.
_log.debug("TagIpset _finish_msg_batch notifying ready")
self.notified_ready = True
self._notify_ready()
def __str__(self):
return (
self.__class__.__name__ + "<queue_len=%s,live=%s,msg=%s,"
"name=%s,id=%s>" %
(
self._event_queue.qsize(),
bool(self.greenlet),
self._current_msg,
self.name,
self._id,
)
)
class Ipset(object):
"""
(Synchronous) wrapper around an ipset, supporting atomic rewrites.
"""
def __init__(self, ipset_name, temp_ipset_name, ip_family,
ipset_type="hash:ip"):
"""
:param str ipset_name: name of the primary ipset. Must be less than
32 chars.
:param str temp_ipset_name: name of a secondary, temporary ipset to
use when doing an atomic rewrite. Must be less than 32 chars.
"""
assert len(ipset_name) < 32
assert len(temp_ipset_name) < 32
self.set_name = ipset_name
self.temp_set_name = temp_ipset_name
self.type = ipset_type
assert ip_family in ("inet", "inet6")
self.family = ip_family
def exists(self):
try:
futils.check_call(["ipset", "list", self.set_name])
except FailedSystemCall as e:
if e.retcode == 1 and "does not exist" in e.stderr:
return False
else:
_log.exception("Failed to check if ipset exists")
raise
else:
return True
def ensure_exists(self):
"""
Creates the ipset iff it does not exist.
Leaves the set and its contents untouched if it already exists.
"""
input_lines = [self._create_cmd(self.set_name)]
self._exec_and_commit(input_lines)
def replace_members(self, members):
"""
Atomically rewrites the ipset with the new members.
Creates the set if it does not exist.
"""
# We use ipset restore, which processes a batch of ipset updates.
# The only operation that we're sure is atomic is swapping two ipsets
# so we build up the complete set of members in a temporary ipset,
# swap it into place and then delete the old ipset.
input_lines = [
# Ensure both the main set and the temporary set exist.
self._create_cmd(self.set_name),
self._create_cmd(self.temp_set_name),
# Flush the temporary set. This is a no-op unless we had a
# left-over temporary set before.
"flush %s" % self.temp_set_name,
]
# Add all the members to the temporary set,
input_lines += ["add %s %s" % (self.temp_set_name, m)
for m in members]
# Then, atomically swap the temporary set into place.
input_lines.append("swap %s %s" % (self.set_name, self.temp_set_name))
# Finally, delete the temporary set (which was the old active set).
input_lines.append("destroy %s" % self.temp_set_name)
# COMMIT tells ipset restore to actually execute the changes.
self._exec_and_commit(input_lines)
def _exec_and_commit(self, input_lines):
"""
Executes the the given lines of "ipset restore" input and
follows them with a COMMIT call.
"""
input_lines.append("COMMIT")
input_str = "\n".join(input_lines) + "\n"
futils.check_call(["ipset", "restore"], input_str=input_str)
def _create_cmd(self, name):
"""
:returns an ipset restore line to create the given ipset iff it
doesn't exist.
"""
return ("create %s %s family %s --exist" %
(name, self.type, self.family))
def delete(self):
"""
Deletes the ipsets. This is done on a best-effort basis.
"""
_log.debug("Delete ipsets %s and %s if they exist",
self.set_name, self.temp_set_name)
futils.call_silent(["ipset", "destroy", self.set_name])
futils.call_silent(["ipset", "destroy", self.temp_set_name])
# For IP-in-IP support, a global ipset that contains the IP addresses of all
# the calico hosts. Only populated when IP-in-IP is enabled and the data is
# in etcd.
HOSTS_IPSET_V4 = Ipset(FELIX_PFX + "calico-hosts-4",
FELIX_PFX + "calico-hosts-4-tmp",
"inet")
def tag_to_ipset_name(ip_type, tag, tmp=False):
"""
Turn a (possibly shortened) tag ID into an ipset name.
:param str ip_type: IP type (IPV4 or IPV6)
:param str tag: Tag ID
:param bool tmp: Is this the tmp ipset, or the permanent one?
"""
if not tmp:
name = IPSET_PREFIX[ip_type] + tag
else:
name = IPSET_TMP_PREFIX[ip_type] + tag
return name
def list_ipset_names():
"""
List all names of ipsets. Note that this is *not* the same as the ipset
list command which lists contents too (hence the name change).
:returns: List of names of ipsets.
"""
data = futils.check_call(["ipset", "list"]).stdout
lines = data.split("\n")
names = []
for line in lines:
words = line.split()
if len(words) > 1 and words[0] == "Name:":
names.append(words[1])
return names
|
<reponame>fossabot/xclim
# noqa: D100
import warnings
from typing import Optional
import numpy as np
import xarray
from xclim.core.calendar import get_calendar
from xclim.core.units import (
convert_units_to,
declare_units,
rate2amount,
str2pint,
to_agg_units,
)
from xclim.core.utils import DayOfYearStr
from . import run_length as rl
from .generic import domain_count, threshold_count
# Frequencies : YS: year start, QS-DEC: seasons starting in december, MS: month start
# See http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
# -------------------------------------------------- #
# ATTENTION: ASSUME ALL INDICES WRONG UNTIL TESTED ! #
# -------------------------------------------------- #
__all__ = [
"calm_days",
"cold_spell_days",
"cold_spell_frequency",
"daily_pr_intensity",
"degree_days_exceedance_date",
"cooling_degree_days",
"continuous_snow_cover_end",
"continuous_snow_cover_start",
"days_with_snow",
"freshet_start",
"growing_degree_days",
"growing_season_start",
"growing_season_end",
"growing_season_length",
"last_spring_frost",
"frost_free_season_start",
"frost_free_season_end",
"frost_free_season_length",
"frost_season_length",
"first_day_below",
"first_day_above",
"first_snowfall",
"last_snowfall",
"heat_wave_index",
"heating_degree_days",
"hot_spell_frequency",
"hot_spell_max_length",
"snow_cover_duration",
"tn_days_above",
"tn_days_below",
"tg_days_above",
"tg_days_below",
"tx_days_above",
"tx_days_below",
"tropical_nights",
"warm_day_frequency",
"warm_night_frequency",
"wetdays",
"winter_storm",
"dry_days",
"maximum_consecutive_dry_days",
"maximum_consecutive_frost_days",
"maximum_consecutive_frost_free_days",
"maximum_consecutive_tx_days",
"maximum_consecutive_wet_days",
"sea_ice_area",
"sea_ice_extent",
"windy_days",
]
@declare_units(sfcWind="[speed]", thresh="[speed]")
def calm_days(
sfcWind: xarray.DataArray, thresh: str = "2 m s-1", freq: str = "MS"
) -> xarray.DataArray:
r"""Calm days.
The number of days with average near-surface wind speed below threshold.
Parameters
----------
sfcWind : xarray.DataArray
Daily windspeed.
thresh : str
Threshold average near-surface wind speed on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days with average near-surface wind speed below threshold.
Notes
-----
Let :math:`WS_{ij}` be the windspeed at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
WS_{ij} < Threshold [m s-1]
"""
thresh = convert_units_to(thresh, sfcWind)
out = threshold_count(sfcWind, "<", thresh, freq)
out = to_agg_units(out, sfcWind, "count")
return out
@declare_units(tas="[temperature]", thresh="[temperature]")
def cold_spell_days(
tas: xarray.DataArray,
thresh: str = "-10 degC",
window: int = 5,
freq: str = "AS-JUL",
) -> xarray.DataArray:
r"""Cold spell days.
The number of days that are part of cold spell events, defined as a sequence of consecutive days with mean daily
temperature below a threshold in °C.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature below which a cold spell begins.
window : int
Minimum number of days with temperature below threshold to qualify as a cold spell.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Cold spell days.
Notes
-----
Let :math:`T_i` be the mean daily temperature on day :math:`i`, the number of cold spell days during
period :math:`\phi` is given by
.. math::
\sum_{i \in \phi} \prod_{j=i}^{i+5} [T_j < thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
"""
t = convert_units_to(thresh, tas)
over = tas < t
group = over.resample(time=freq)
out = group.map(rl.windowed_run_count, window=window, dim="time")
return to_agg_units(out, tas, "count")
@declare_units(tas="[temperature]", thresh="[temperature]")
def cold_spell_frequency(
tas: xarray.DataArray,
thresh: str = "-10 degC",
window: int = 5,
freq: str = "AS-JUL",
) -> xarray.DataArray:
r"""Cold spell frequency.
The number of cold spell events, defined as a sequence of consecutive days with mean daily
temperature below a threshold.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature below which a cold spell begins.
window : int
Minimum number of days with temperature below threshold to qualify as a cold spell.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Cold spell frequency.
"""
t = convert_units_to(thresh, tas)
over = tas < t
group = over.resample(time=freq)
out = group.map(rl.windowed_run_events, window=window, dim="time")
out.attrs["units"] = ""
return out
@declare_units(snd="[length]", thresh="[length]")
def continuous_snow_cover_end(
snd: xarray.DataArray, thresh: str = "2 cm", window: int = 14, freq: str = "AS-JUL"
) -> xarray.DataArray:
r"""End date of continuous snow cover.
First day after the start of the continuous snow cover when snow depth is below `threshold` for at least
`window` consecutive days.
WARNING: The default `freq` is valid for the northern hemisphere.
Parameters
----------
snd : xarray.DataArray
Surface snow thickness.
thresh : str
Threshold snow thickness.
window : int
Minimum number of days with snow depth below threshold.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
First day after the start of the continuous snow cover when the snow depth goes below a threshold
for a minimum duration.
If there is no such day, return np.nan.
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>. 2017: Élaboration du portrait bioclimatique futur
du Nunavik – Tome II. [Rapport présenté au Ministère de la forêt, de la faune et des parcs], Ouranos.
"""
thresh = convert_units_to(thresh, snd)
cond = snd >= thresh
out = (
cond.resample(time=freq)
.map(rl.season, window=window, dim="time", coord="dayofyear")
.end
)
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(snd))
return out
@declare_units(snd="[length]", thresh="[length]")
def continuous_snow_cover_start(
snd: xarray.DataArray, thresh: str = "2 cm", window: int = 14, freq: str = "AS-JUL"
) -> xarray.DataArray:
r"""Start date of continuous snow cover.
Day of year when snow depth is above or equal `threshold` for at least `window` consecutive days.
WARNING: The default `freq` is valid for the northern hemisphere.
Parameters
----------
snd : xarray.DataArray
Surface snow thickness.
thresh : str
Threshold snow thickness.
window : int
Minimum number of days with snow depth above or equal to threshold.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
First day of the year when the snow depth is superior to a threshold for a minimum duration.
If there is no such day, return np.nan.
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>. 2017: Élaboration du portrait bioclimatique futur
du Nunavik – Tome II. [Rapport présenté au Ministère de la forêt, de la faune et des parcs], Ouranos.
"""
thresh = convert_units_to(thresh, snd)
cond = snd >= thresh
out = (
cond.resample(time=freq)
.map(
rl.season,
window=window,
dim="time",
coord="dayofyear",
)
.start
)
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(snd))
return out
@declare_units(pr="[precipitation]", thresh="[precipitation]")
def daily_pr_intensity(
pr: xarray.DataArray, thresh: str = "1 mm/day", freq: str = "YS"
) -> xarray.DataArray:
r"""Average daily precipitation intensity.
Return the average precipitation over wet days.
Parameters
----------
pr : xarray.DataArray
Daily precipitation.
thresh : str
Precipitation value over which a day is considered wet.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [precipitation]
The average precipitation over wet days for each period
Notes
-----
Let :math:`\mathbf{p} = p_0, p_1, \ldots, p_n` be the daily precipitation and :math:`thresh` be the precipitation
threshold defining wet days. Then the daily precipitation intensity is defined as
.. math::
\frac{\sum_{i=0}^n p_i [p_i \leq thresh]}{\sum_{i=0}^n [p_i \leq thresh]}
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the average
precipitation fallen over days with precipitation >= 5 mm at seasonal
frequency, ie DJF, MAM, JJA, SON, DJF, etc.:
>>> from xclim.indices import daily_pr_intensity
>>> pr = xr.open_dataset(path_to_pr_file).pr
>>> daily_int = daily_pr_intensity(pr, thresh='5 mm/day', freq="QS-DEC")
"""
t = convert_units_to(thresh, pr, "hydro")
# Get amount of rain (not rate)
pram = rate2amount(pr)
# put pram = 0 for non wet-days
pram_wd = xarray.where(pr >= t, pram, 0)
pram_wd.attrs["units"] = pram.units
# sum over wanted period
s = pram_wd.resample(time=freq).sum(dim="time", keep_attrs=True)
# get number of wetdays over period
wd = wetdays(pr, thresh=thresh, freq=freq)
out = s / wd
out.attrs["units"] = f"{str2pint(s.units) / str2pint(wd.units):~}"
return out
@declare_units(pr="[precipitation]", thresh="[precipitation]")
def dry_days(
pr: xarray.DataArray, thresh: str = "0.2 mm/d", freq: str = "YS"
) -> xarray.DataArray:
r"""Dry days.
The number of days with daily precipitation below threshold.
Parameters
----------
pr : xarray.DataArray
Daily precipitation.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days with daily precipitation below threshold.
Notes
-----
Let :math:`PR_{ij}` be the daily precipitation at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
\sum PR_{ij} < Threshold [mm/day]
"""
thresh = convert_units_to(thresh, pr)
out = threshold_count(pr, "<", thresh, freq)
out = to_agg_units(out, pr, "count")
return out
@declare_units(pr="[precipitation]", thresh="[precipitation]")
def maximum_consecutive_wet_days(
pr: xarray.DataArray, thresh: str = "1 mm/day", freq: str = "YS"
) -> xarray.DataArray:
r"""Consecutive wet days.
Returns the maximum number of consecutive wet days.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux.
thresh : str
Threshold precipitation on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
The maximum number of consecutive wet days.
Notes
-----
Let :math:`\mathbf{x}=x_0, x_1, \ldots, x_n` be a daily precipitation series and
:math:`\mathbf{s}` be the sorted vector of indices :math:`i` where :math:`[p_i > thresh] \neq [p_{i+1} >
thresh]`, that is, the days when the precipitation crosses the *wet day* threshold.
Then the maximum number of consecutive wet days is given by
.. math::
\max(\mathbf{d}) \quad \mathrm{where} \quad d_j = (s_j - s_{j-1}) [x_{s_j} > 0^\circ C]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. Note that this formula does not handle sequences at
the start and end of the series, but the numerical algorithm does.
"""
thresh = convert_units_to(thresh, pr, "hydro")
group = (pr > thresh).resample(time=freq)
out = group.map(rl.longest_run, dim="time")
out = to_agg_units(out, pr, "count")
return out
@declare_units(tas="[temperature]", thresh="[temperature]")
def cooling_degree_days(
tas: xarray.DataArray, thresh: str = "18 degC", freq: str = "YS"
) -> xarray.DataArray:
r"""Cooling degree days.
Sum of degree days above the temperature threshold at which spaces are cooled.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Temperature threshold above which air is cooled.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time][temperature]
Cooling degree days
Notes
-----
Let :math:`x_i` be the daily mean temperature at day :math:`i`. Then the cooling degree days above
temperature threshold :math:`thresh` over period :math:`\phi` is given by:
.. math::
\sum_{i \in \phi} (x_{i}-{thresh} [x_i > thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
"""
thresh = convert_units_to(thresh, tas)
out = (tas - thresh).clip(min=0).resample(time=freq).sum(dim="time")
out = to_agg_units(out, tas, "delta_prod")
return out
@declare_units(tas="[temperature]", thresh="[temperature]")
def freshet_start(
tas: xarray.DataArray, thresh: str = "0 degC", window: int = 5, freq: str = "YS"
) -> xarray.DataArray:
r"""First day consistently exceeding threshold temperature.
Returns first day of period where a temperature threshold is exceeded
over a given number of days.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
window : int
Minimum number of days with temperature above threshold needed for evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Day of the year when temperature exceeds threshold over a given number of days for the first time. If there is
no such day, return np.nan.
Notes
-----
Let :math:`x_i` be the daily mean temperature at day of the year :math:`i` for values of :math:`i` going from 1
to 365 or 366. The start date of the freshet is given by the smallest index :math:`i` for which
.. math::
\prod_{j=i}^{i+w} [x_j > thresh]
is true, where :math:`w` is the number of days the temperature threshold should be exceeded, and :math:`[P]` is
1 if :math:`P` is true, and 0 if false.
"""
thresh = convert_units_to(thresh, tas)
over = tas > thresh
out = over.resample(time=freq).map(rl.first_run, window=window, coord="dayofyear")
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(tas))
return out
@declare_units(tas="[temperature]", thresh="[temperature]")
def growing_degree_days(
tas: xarray.DataArray, thresh: str = "4.0 degC", freq: str = "YS"
) -> xarray.DataArray:
r"""Growing degree-days over threshold temperature value.
The sum of degree-days over the threshold temperature.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time][temperature]
The sum of growing degree-days above a given threshold.
Notes
-----
Let :math:`TG_{ij}` be the daily mean temperature at day :math:`i` of period :math:`j`. Then the
growing degree days are:
.. math::
GD4_j = \sum_{i=1}^I (TG_{ij}-{4} | TG_{ij} > {4}℃)
"""
thresh = convert_units_to(thresh, tas)
out = (tas - thresh).clip(min=0).resample(time=freq).sum(dim="time")
return to_agg_units(out, tas, "delta_prod")
@declare_units(tas="[temperature]", thresh="[temperature]")
def growing_season_start(
tas: xarray.DataArray, thresh: str = "5.0 degC", window: int = 5, freq: str = "YS"
) -> xarray.DataArray:
r"""Start of the growing season.
Day of the year of the start of a sequence of days with mean temperatures consistently
above or equal to a threshold, after a period with mean temperatures consistently above the same threshold.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
window : int
Minimum number of days with temperature above threshold needed for evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Day of the year when temperature is superior to a threshold over a given number of days for the first time.
If there is no such day or if a growing season is not detected, returns np.nan.
Notes
-----
Let :math:`x_i` be the daily mean temperature at day of the year :math:`i` for values of :math:`i` going from 1
to 365 or 366. The start date of the start of growing season is given by the smallest index :math:`i` for which:
.. math::
\prod_{j=i}^{i+w} [x_j >= thresh]
is true, where :math:`w` is the number of days the temperature threshold should be met or exceeded,
and :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
"""
thresh = convert_units_to(thresh, tas)
over = tas >= thresh
out = over.resample(time=freq).map(rl.first_run, window=window, coord="dayofyear")
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(tas))
return out
@declare_units(tas="[temperature]", thresh="[temperature]")
def growing_season_end(
tas: xarray.DataArray,
thresh: str = "5.0 degC",
mid_date: DayOfYearStr = "07-01",
window: int = 5,
freq: str = "YS",
) -> xarray.DataArray:
r"""End of the growing season.
Day of the year of the start of a sequence of days with mean temperatures consistently
below a threshold, after a period with mean temperatures consistently above the same threshold.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
mid_date : str
Date of the year after which to look for the end of the season. Should have the format '%m-%d'.
window : int
Minimum number of days with temperature below threshold needed for evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Day of the year when temperature is inferior to a threshold over a given number of days for the first time.
If there is no such day or if a growing season is not detected, returns np.nan.
If the growing season does not end within the time period, returns the last day of the period.
"""
thresh = convert_units_to(thresh, tas)
cond = tas >= thresh
out = cond.resample(time=freq).map(
rl.run_end_after_date,
window=window,
date=mid_date,
dim="time",
coord="dayofyear",
)
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(tas))
return out
@declare_units(tas="[temperature]", thresh="[temperature]")
def growing_season_length(
tas: xarray.DataArray,
thresh: str = "5.0 degC",
window: int = 6,
mid_date: DayOfYearStr = "07-01",
freq: str = "YS",
) -> xarray.DataArray:
r"""Growing season length.
The number of days between the first occurrence of at least six consecutive days
with mean daily temperature over a threshold (default: 5℃) and the first occurrence
of at least six consecutive days with mean daily temperature below the same threshold
after a certain date.
(Usually July 1st in the northern emisphere and January 1st in the southern hemisphere.)
WARNING: The default calendar values are only valid for the northern hemisphere.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
window : int
Minimum number of days with temperature above threshold to mark the beginning and end of growing season.
mid_date : str
Date of the year after which to look for the end of the season. Should have the format '%m-%d'.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Growing season length.
Notes
-----
Let :math:`TG_{ij}` be the mean temperature at day :math:`i` of period :math:`j`. Then counted is
the number of days between the first occurrence of at least 6 consecutive days with:
.. math::
TG_{ij} > 5 ℃
and the first occurrence after 1 July of at least 6 consecutive days with:
.. math::
TG_{ij} < 5 ℃
Examples
--------
>>> from xclim.indices import growing_season_length
>>> tas = xr.open_dataset(path_to_tas_file).tas
# For the Northern Hemisphere:
>>> gsl_nh = growing_season_length(tas, mid_date='07-01', freq='AS')
# If working in the Southern Hemisphere, one can use:
>>> gsl_sh = growing_season_length(tas, mid_date='01-01', freq='AS-JUL')
"""
thresh = convert_units_to(thresh, tas)
cond = tas >= thresh
out = cond.resample(time=freq).map(
rl.season_length,
window=window,
date=mid_date,
dim="time",
)
return to_agg_units(out, tas, "count")
@declare_units(tasmin="[temperature]", thresh="[temperature]")
def frost_season_length(
tasmin: xarray.DataArray,
window: int = 5,
mid_date: Optional[DayOfYearStr] = "01-01",
thresh: str = "0.0 degC",
freq: str = "AS-JUL",
) -> xarray.DataArray:
r"""Frost season length.
The number of days between the first occurrence of at least N (def: 5) consecutive days
with minimum daily temperature under a threshold (default: 0℃) and the first occurrence
of at least N (def 5) consecutive days with minimum daily temperature above the same threshold
A mid date can be given to limit the earliest day the end of season can take.
WARNING: The default freq and mid_date values are valid for the northern hemisphere.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
window : int
Minimum number of days with temperature below threshold to mark the beginning and end of frost season.
mid_date : str, optional
Date the must be included in the season. It is the earliest the end of the season can be.
If None, there is no limit.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Frost season length.
Notes
-----
Let :math:`TN_{ij}` be the minimum temperature at day :math:`i` of period :math:`j`. Then counted is
the number of days between the first occurrence of at least N consecutive days with:
.. math::
TN_{ij} > 0 ℃
and the first subsequent occurrence of at least N consecutive days with:
.. math::
TN_{ij} < 0 ℃
Examples
--------
>>> from xclim.indices import frost_season_length
>>> tasmin = xr.open_dataset(path_to_tasmin_file).tasmin
# For the Northern Hemisphere:
>>> fsl_nh = frost_season_length(tasmin, freq='AS-JUL')
# If working in the Southern Hemisphere, one can use:
>>> fsl_sh = frost_season_length(tasmin, freq='YS')
"""
thresh = convert_units_to(thresh, tasmin)
cond = tasmin < thresh
out = cond.resample(time=freq).map(
rl.season_length,
window=window,
date=mid_date,
dim="time",
)
return to_agg_units(out, tasmin, "count")
@declare_units(tasmin="[temperature]", thresh="[temperature]")
def frost_free_season_start(
tasmin: xarray.DataArray,
thresh: str = "0.0 degC",
window: int = 5,
freq: str = "YS",
) -> xarray.DataArray:
r"""Start of the frost free season.
Day of the year of the start of a sequence of days with minimum temperatures consistently
above or equal to a threshold, after a period with minimum temperatures consistently above the same threshold.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
window : int
Minimum number of days with temperature above threshold needed for evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Day of the year when minimum temperature is superior to a threshold
over a given number of days for the first time.
If there is no such day or if a frost free season is not detected, returns np.nan.
Notes
-----
Let :math:`x_i` be the daily mean temperature at day of the year :math:`i` for values of :math:`i` going from 1
to 365 or 366. The start date of the start of growing season is given by the smallest index :math:`i` for which:
.. math::
\prod_{j=i}^{i+w} [x_j >= thresh]
is true, where :math:`w` is the number of days the temperature threshold should be met or exceeded,
and :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
"""
thresh = convert_units_to(thresh, tasmin)
over = tasmin >= thresh
out = over.resample(time=freq).map(rl.first_run, window=window, coord="dayofyear")
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(tasmin))
return out
@declare_units(tasmin="[temperature]", thresh="[temperature]")
def frost_free_season_end(
tasmin: xarray.DataArray,
thresh: str = "0.0 degC",
mid_date: DayOfYearStr = "07-01",
window: int = 5,
freq: str = "YS",
) -> xarray.DataArray:
r"""End of the frost free season.
Day of the year of the start of a sequence of days with minimum temperatures consistently
below a threshold, after a period with minimum temperatures consistently above the same threshold.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
mid_date : str
Date of the year after which to look for the end of the season. Should have the format '%m-%d'.
window : int
Minimum number of days with temperature below threshold needed for evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Day of the year when minimum temperature is inferior to a threshold
over a given number of days for the first time.
If there is no such day or if a frost free season is not detected, returns np.nan.
If the frost free season does not end within the time period, returns the last day of the period.
"""
thresh = convert_units_to(thresh, tasmin)
cond = tasmin >= thresh
out = cond.resample(time=freq).map(
rl.run_end_after_date,
window=window,
date=mid_date,
dim="time",
coord="dayofyear",
)
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(tasmin))
return out
@declare_units(tasmin="[temperature]", thresh="[temperature]")
def frost_free_season_length(
tasmin: xarray.DataArray,
window: int = 5,
mid_date: Optional[DayOfYearStr] = "07-01",
thresh: str = "0.0 degC",
freq: str = "YS",
) -> xarray.DataArray:
r"""Frost free season length.
The number of days between the first occurrence of at least N (def: 5) consecutive days
with minimum daily temperature above a threshold (default: 0℃) and the first occurrence
of at least N (def 5) consecutive days with minimum daily temperature below the same threshold
A mid date can be given to limit the earliest day the end of season can take.
WARNING: The default freq and mid_date values are valid for the northern hemisphere.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
window : int
Minimum number of days with temperature above threshold to mark the beginning and end of frost free season.
mid_date : str, optional
Date the must be included in the season. It is the earliest the end of the season can be.
If None, there is no limit.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Frost free season length.
Notes
-----
Let :math:`TN_{ij}` be the minimum temperature at day :math:`i` of period :math:`j`. Then counted is
the number of days between the first occurrence of at least N consecutive days with:
.. math::
TN_{ij} >= 0 ℃
and the first subsequent occurrence of at least N consecutive days with:
.. math::
TN_{ij} < 0 ℃
Examples
--------
>>> from xclim.indices import frost_season_length
>>> tasmin = xr.open_dataset(path_to_tasmin_file).tasmin
# For the Northern Hemisphere:
>>> ffsl_nh = frost_free_season_length(tasmin, freq='YS')
# If working in the Southern Hemisphere, one can use:
>>> ffsl_sh = frost_free_season_length(tasmin, freq='AS-JUL')
"""
thresh = convert_units_to(thresh, tasmin)
cond = tasmin >= thresh
out = cond.resample(time=freq).map(
rl.season_length,
window=window,
date=mid_date,
dim="time",
)
return to_agg_units(out, tasmin, "count")
@declare_units(tas="[temperature]", thresh="[temperature]")
def last_spring_frost(
tas: xarray.DataArray,
thresh: str = "0 degC",
before_date: DayOfYearStr = "07-01",
window: int = 1,
freq: str = "YS",
) -> xarray.DataArray:
r"""Last day of temperatures inferior to a threshold temperature.
Returns last day of period where a temperature is inferior to a threshold
over a given number of days and limited to a final calendar date.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
before_date : str,
Date of the year before which to look for the final frost event. Should have the format '%m-%d'.
window : int
Minimum number of days with temperature below threshold needed for evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Day of the year when temperature is inferior to a threshold over a given number of days for the first time.
If there is no such day, returns np.nan.
"""
thresh = convert_units_to(thresh, tas)
cond = tas < thresh
out = cond.resample(time=freq).map(
rl.last_run_before_date,
window=window,
date=before_date,
dim="time",
coord="dayofyear",
)
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(tas))
return out
@declare_units(tasmin="[temperature]", thresh="[temperature]")
def first_day_below(
tasmin: xarray.DataArray,
thresh: str = "0 degC",
after_date: DayOfYearStr = "07-01",
window: int = 1,
freq: str = "YS",
) -> xarray.DataArray:
r"""First day of temperatures inferior to a threshold temperature.
Returns first day of period where a temperature is inferior to a threshold
over a given number of days, limited to a starting calendar date.
WARNING: The default date and freq are valid for the northern hemisphere.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
after_date : str
Date of the year after which to look for the first frost event. Should have the format '%m-%d'.
window : int
Minimum number of days with temperature below threshold needed for evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Day of the year when minimum temperature is inferior to a threshold over a given number of days for the first time.
If there is no such day, returns np.nan.
"""
thresh = convert_units_to(thresh, tasmin)
cond = tasmin < thresh
out = cond.resample(time=freq).map(
rl.first_run_after_date,
window=window,
date=after_date,
dim="time",
coord="dayofyear",
)
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(tasmin))
return out
@declare_units(tasmin="[temperature]", thresh="[temperature]")
def first_day_above(
tasmin: xarray.DataArray,
thresh: str = "0 degC",
after_date: DayOfYearStr = "01-01",
window: int = 1,
freq: str = "YS",
) -> xarray.DataArray:
r"""First day of temperatures superior to a threshold temperature.
Returns first day of period where a temperature is superior to a threshold
over a given number of days, limited to a starting calendar date.
WARNING: The default date and freq are valid for the northern hemisphere.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
after_date : str
Date of the year after which to look for the first event. Should have the format '%m-%d'.
window : int
Minimum number of days with temperature above threshold needed for evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Day of the year when minimum temperature is superior to a threshold over a given number of days for the first time.
If there is no such day, returns np.nan.
"""
thresh = convert_units_to(thresh, tasmin)
cond = tasmin > thresh
out = cond.resample(time=freq).map(
rl.first_run_after_date,
window=window,
date=after_date,
dim="time",
coord="dayofyear",
)
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(tasmin))
return out
@declare_units(prsn="[precipitation]", thresh="[precipitation]")
def first_snowfall(
prsn: xarray.DataArray,
thresh: str = "0.5 mm/day",
freq: str = "AS-JUL",
) -> xarray.DataArray:
r"""First day with solid precipitation above a threshold.
Returns the first day of a period where the solid precipitation exceeds a threshold.
WARNING: The default `freq` is valid for the northern hemisphere.
Parameters
----------
prsn : xarray.DataArray
Solid precipitation flux.
thresh : str
Threshold precipitation flux on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
First day of the year when the solid precipitation is superior to a threshold.
If there is no such day, returns np.nan.
References
----------
Climate Projections for the National Capital Region (2020), Volume 1: Results and Interpretation for Key Climate
Indices, Report 193600.00, Prepared for Ottawa by CBCL.
"""
thresh = convert_units_to(thresh, prsn)
cond = prsn >= thresh
out = cond.resample(time=freq).map(
rl.first_run,
window=1,
dim="time",
coord="dayofyear",
)
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(prsn))
return out
@declare_units(prsn="[precipitation]", thresh="[precipitation]")
def last_snowfall(
prsn: xarray.DataArray,
thresh: str = "0.5 mm/day",
freq: str = "AS-JUL",
) -> xarray.DataArray:
r"""Last day with solid precipitation above a threshold.
Returns the last day of a period where the solid precipitation exceeds a threshold.
WARNING: The default freq is valid for the northern hemisphere.
Parameters
----------
prsn : xarray.DataArray
Solid precipitation flux.
thresh : str
Threshold precipitation flux on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Last day of the year when the solid precipitation is superior to a threshold.
If there is no such day, returns np.nan.
References
----------
Climate Projections for the National Capital Region (2020), Volume 1: Results and Interpretation for Key Climate
Indices, Report 193600.00, Prepared for Ottawa by CBCL.
"""
thresh = convert_units_to(thresh, prsn)
cond = prsn >= thresh
out = cond.resample(time=freq).map(
rl.last_run,
window=1,
dim="time",
coord="dayofyear",
)
out.attrs["units"] = ""
return out
@declare_units(prsn="[precipitation]", low="[precipitation]", high="[precipitation]")
def days_with_snow(
prsn: xarray.DataArray, # noqa
low: str = "0 kg m-2 s-1",
high: str = "1E6 kg m-2 s-1",
freq: str = "AS-JUL",
) -> xarray.DataArray:
r"""Days with snow.
Return the number of days where snowfall is within low and high thresholds.
Parameters
----------
prsn : xr.DataArray
Solid precipitation flux.
low : float
Minimum threshold solid precipitation flux.
high : float
Maximum threshold solid precipitation flux.
freq : str
Resampling frequency defining the periods as defined in
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling.
Returns
-------
xarray.DataArray, [time]
Number of days where snowfall is between low and high thresholds.
References
----------
<NAME>., <NAME>., & <NAME>. (2017). Planning for Winter Road Maintenance in the Context of Climate
Change, Weather, Climate, and Society, 9(3), 521-532, https://doi.org/10.1175/WCAS-D-16-0103.1
"""
low = convert_units_to(low, prsn)
high = convert_units_to(high, prsn)
out = domain_count(prsn, low, high, freq)
return to_agg_units(out, prsn, "count")
@declare_units(tasmax="[temperature]", thresh="[temperature]")
def heat_wave_index(
tasmax: xarray.DataArray,
thresh: str = "25.0 degC",
window: int = 5,
freq: str = "YS",
) -> xarray.DataArray:
"""Heat wave index.
Number of days that are part of a heatwave, defined as five or more consecutive days over 25℃.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature.
thresh : str
Threshold temperature on which to designate a heatwave.
window : int
Minimum number of days with temperature above threshold to qualify as a heatwave.
freq : str
Resampling frequency.
Returns
-------
DataArray, [time]
Heat wave index.
"""
thresh = convert_units_to(thresh, tasmax)
over = tasmax > thresh
group = over.resample(time=freq)
out = group.map(rl.windowed_run_count, window=window, dim="time")
return to_agg_units(out, tasmax, "count")
@declare_units(tas="[temperature]", thresh="[temperature]")
def heating_degree_days(
tas: xarray.DataArray, thresh: str = "17.0 degC", freq: str = "YS"
) -> xarray.DataArray:
r"""Heating degree days.
Sum of degree days below the temperature threshold at which spaces are heated.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time][temperature]
Heating degree days index.
Notes
-----
This index intentionally differs from its ECA&D equivalent: HD17. In HD17, values below zero are
not clipped before the sum. The present definition should provide a better representation of the energy
demand for heating buildings to the given threshold.
Let :math:`TG_{ij}` be the daily mean temperature at day :math:`i` of period :math:`j`. Then the
heating degree days are:
.. math::
HD17_j = \sum_{i=1}^{I} (17℃ - TG_{ij}) | TG_{ij} < 17℃)
"""
thresh = convert_units_to(thresh, tas)
out = (thresh - tas).clip(0).resample(time=freq).sum(dim="time")
return to_agg_units(out, tas, "delta_prod")
@declare_units(tasmax="[temperature]", thresh_tasmax="[temperature]")
def hot_spell_max_length(
tasmax: xarray.DataArray,
thresh_tasmax: str = "30 degC",
window: int = 1,
freq: str = "YS",
) -> xarray.DataArray:
"""Longest hot spell.
Longest spell of high temperatures over a given period.
The longest series of consecutive days with tasmax ≥ 30 °C. Here, there is no minimum threshold for number of
days in a row that must be reached or exceeded to count as a spell. A year with zero +30 °C days will return a
longest spell value of zero.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature.
thresh_tasmax : str
The maximum temperature threshold needed to trigger a heatwave event.
window : int
Minimum number of days with temperatures above thresholds to qualify as a heatwave.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Maximum length of continuous hot days at the wanted frequency.
Notes
-----
The thresholds of 22° and 25°C for night temperatures and 30° and 35°C for day temperatures were selected by
Health Canada professionals, following a temperature–mortality analysis. These absolute temperature thresholds
characterize the occurrence of hot weather events that can result in adverse health outcomes for Canadian
communities (Casati et al., 2013).
In Robinson (2001), the parameters would be `thresh_tasmin=27.22, thresh_tasmax=39.44, window=2` (81F, 103F).
References
----------
<NAME>., <NAME>, and <NAME>, 2013: Regional Climate Projections of Extreme Heat Events in Nine Pilot
Canadian Communities for Public Health Planning. J. Appl. Meteor. Climatol., 52, 2669–2698,
https://doi.org/10.1175/JAMC-D-12-0341.1
<NAME>., 2001: On the Definition of a Heat Wave. J. Appl. Meteor., 40, 762–775,
https://doi.org/10.1175/1520-0450(2001)040<0762:OTDOAH>2.0.CO;2
"""
thresh_tasmax = convert_units_to(thresh_tasmax, tasmax)
cond = tasmax > thresh_tasmax
group = cond.resample(time=freq)
max_l = group.map(rl.longest_run, dim="time")
out = max_l.where(max_l >= window, 0)
return to_agg_units(out, tasmax, "count")
@declare_units(tasmax="[temperature]", thresh_tasmax="[temperature]")
def hot_spell_frequency(
tasmax: xarray.DataArray,
thresh_tasmax: str = "30 degC",
window: int = 3,
freq: str = "YS",
) -> xarray.DataArray:
"""Hot spell frequency.
Number of hot spells over a given period. A hot spell is defined as an event
where the maximum daily temperature exceeds a specific threshold
over a minimum number of days.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature.
thresh_tasmax : str
The maximum temperature threshold needed to trigger a heatwave event.
window : int
Minimum number of days with temperatures above thresholds to qualify as a heatwave.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Number of heatwave at the wanted frequency
Notes
-----
The thresholds of 22° and 25°C for night temperatures and 30° and 35°C for day temperatures were selected by
Health Canada professionals, following a temperature–mortality analysis. These absolute temperature thresholds
characterize the occurrence of hot weather events that can result in adverse health outcomes for Canadian
communities (Casati et al., 2013).
In Robinson (2001), the parameters would be `thresh_tasmin=27.22, thresh_tasmax=39.44, window=2` (81F, 103F).
References
----------
<NAME>., <NAME>, and <NAME>, 2013: Regional Climate Projections of Extreme Heat Events in Nine Pilot
Canadian Communities for Public Health Planning. J. Appl. Meteor. Climatol., 52, 2669–2698,
https://doi.org/10.1175/JAMC-D-12-0341.1
<NAME>., 2001: On the Definition of a Heat Wave. J. Appl. Meteor., 40, 762–775,
https://doi.org/10.1175/1520-0450(2001)040<0762:OTDOAH>2.0.CO;2
"""
thresh_tasmax = convert_units_to(thresh_tasmax, tasmax)
cond = tasmax > thresh_tasmax
group = cond.resample(time=freq)
out = group.map(rl.windowed_run_events, window=window, dim="time")
out.attrs["units"] = ""
return out
@declare_units(snd="[length]", thresh="[length]")
def snow_cover_duration(
snd: xarray.DataArray, thresh: str = "2 cm", freq: str = "AS-JUL"
) -> xarray.DataArray:
"""Number of days with snow depth above a threshold.
Number of days where surface snow depth is greater or equal to given threshold.
WARNING: The default `freq` is valid for the northern hemisphere.
Parameters
----------
snd : xarray.DataArray
Surface snow thickness.
thresh : str
Threshold snow thickness.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days where snow depth is greater or equal to threshold.
"""
thresh = convert_units_to(thresh, snd)
out = threshold_count(snd, ">=", thresh, freq)
return to_agg_units(out, snd, "count")
@declare_units(tasmin="[temperature]", thresh="[temperature]")
def tn_days_above(
tasmin: xarray.DataArray, thresh: str = "20.0 degC", freq: str = "YS"
): # noqa: D401
"""Number of days with tasmin above a threshold (number of tropical nights).
Number of days where daily minimum temperature exceeds a threshold.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days where tasmin > threshold.
Notes
-----
Let :math:`TN_{ij}` be the daily minimum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TN_{ij} > Threshold [℃]
"""
thresh = convert_units_to(thresh, tasmin)
f = threshold_count(tasmin, ">", thresh, freq)
return to_agg_units(f, tasmin, "count")
@declare_units(tasmin="[temperature]", thresh="[temperature]")
def tn_days_below(
tasmin: xarray.DataArray, thresh: str = "-10.0 degC", freq: str = "YS"
) -> xarray.DataArray: # noqa: D401
"""Number of days with tasmin below a threshold.
Number of days where daily minimum temperature is below a threshold.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days where tasmin < threshold.
Notes
-----
Let :math:`TN_{ij}` be the daily minimum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TN_{ij} < Threshold [℃]
"""
thresh = convert_units_to(thresh, tasmin)
f1 = threshold_count(tasmin, "<", thresh, freq)
return to_agg_units(f1, tasmin, "count")
@declare_units(tas="[temperature]", thresh="[temperature]")
def tg_days_above(
tas: xarray.DataArray, thresh: str = "10.0 degC", freq: str = "YS"
): # noqa: D401
"""Number of days with tas above a threshold.
Number of days where daily mean temperature exceeds a threshold.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days where tas > threshold.
Notes
-----
Let :math:`TG_{ij}` be the daily mean temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TG_{ij} > Threshold [℃]
"""
thresh = convert_units_to(thresh, tas)
f = threshold_count(tas, ">", thresh, freq)
return to_agg_units(f, tas, "count")
@declare_units(tas="[temperature]", thresh="[temperature]")
def tg_days_below(
tas: xarray.DataArray, thresh: str = "10.0 degC", freq: str = "YS"
): # noqa: D401
"""Number of days with tas below a threshold.
Number of days where daily mean temperature is below a threshold.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days where tas < threshold.
Notes
-----
Let :math:`TG_{ij}` be the daily mean temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TG_{ij} < Threshold [℃]
"""
thresh = convert_units_to(thresh, tas)
f1 = threshold_count(tas, "<", thresh, freq)
return to_agg_units(f1, tas, "count")
@declare_units(tasmax="[temperature]", thresh="[temperature]")
def tx_days_above(
tasmax: xarray.DataArray, thresh: str = "25.0 degC", freq: str = "YS"
) -> xarray.DataArray: # noqa: D401
"""Number of days with tasmax above a threshold (number of summer days).
Number of days where daily maximum temperature exceeds a threshold.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days where tasmax > threshold (number of summer days).
Notes
-----
Let :math:`TX_{ij}` be the daily maximum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TX_{ij} > Threshold [℃]
"""
thresh = convert_units_to(thresh, tasmax)
f = threshold_count(tasmax, ">", thresh, freq)
return to_agg_units(f, tasmax, "count")
@declare_units(tasmax="[temperature]", thresh="[temperature]")
def tx_days_below(
tasmax: xarray.DataArray, thresh: str = "25.0 degC", freq: str = "YS"
): # noqa: D401
"""Number of days with tmax below a threshold.
Number of days where daily maximum temperature is below a threshold.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days where tasmin < threshold.
Notes
-----
Let :math:`TN_{ij}` be the daily minimum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TN_{ij} < Threshold [℃]
"""
thresh = convert_units_to(thresh, tasmax)
f1 = threshold_count(tasmax, "<", thresh, freq)
return to_agg_units(f1, tasmax, "count")
@declare_units(tasmax="[temperature]", thresh="[temperature]")
def warm_day_frequency(
tasmax: xarray.DataArray, thresh: str = "30 degC", freq: str = "YS"
) -> xarray.DataArray:
"""Frequency of extreme warm days.
Return the number of days with tasmax > thresh per period
Parameters
----------
tasmax : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days with tasmax > threshold per period.
Notes
-----
Let :math:`TX_{ij}` be the daily maximum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TN_{ij} > Threshold [℃]
"""
thresh = convert_units_to(thresh, tasmax)
events = threshold_count(tasmax, ">", thresh, freq)
return to_agg_units(events, tasmax, "count")
@declare_units(tasmin="[temperature]", thresh="[temperature]")
def warm_night_frequency(
tasmin: xarray.DataArray, thresh: str = "22 degC", freq: str = "YS"
) -> xarray.DataArray:
"""Frequency of extreme warm nights.
Return the number of days with tasmin > thresh per period
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days with tasmin > threshold per period.
"""
thresh = convert_units_to(thresh, tasmin)
events = threshold_count(tasmin, ">", thresh, freq)
return to_agg_units(events, tasmin, "count")
@declare_units(pr="[precipitation]", thresh="[precipitation]")
def wetdays(
pr: xarray.DataArray, thresh: str = "1.0 mm/day", freq: str = "YS"
) -> xarray.DataArray:
"""Wet days.
Return the total number of days during period with precipitation over threshold.
Parameters
----------
pr : xarray.DataArray
Daily precipitation.
thresh : str
Precipitation value over which a day is considered wet.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
The number of wet days for each period [day].
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the number days
with precipitation over 5 mm at the seasonal frequency, ie DJF, MAM, JJA, SON, DJF, etc.:
>>> from xclim.indices import wetdays
>>> pr = xr.open_dataset(path_to_pr_file).pr
>>> wd = wetdays(pr, thresh="5 mm/day", freq="QS-DEC")
"""
thresh = convert_units_to(thresh, pr, "hydro")
wd = threshold_count(pr, ">=", thresh, freq)
return to_agg_units(wd, pr, "count")
@declare_units(tasmin="[temperature]", thresh="[temperature]")
def maximum_consecutive_frost_days(
tasmin: xarray.DataArray,
thresh: str = "0.0 degC",
freq: str = "AS-JUL",
) -> xarray.DataArray:
r"""Maximum number of consecutive frost days (Tn < 0℃).
The maximum number of consecutive days within the period where the
temperature is under a certain threshold (default: 0°C).
WARNING: The default freq value is valid for the northern hemisphere.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
thresh : str
Threshold temperature.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
The maximum number of consecutive frost days (tasmin < threshold per period).
Notes
-----
Let :math:`\mathbf{t}=t_0, t_1, \ldots, t_n` be a daily minimum temperature series and :math:`thresh` the threshold
below which a day is considered a frost day. Let :math:`\mathbf{s}` be the sorted vector of indices :math:`i`
where :math:`[t_i < thresh] \neq [t_{i+1} < thresh]`, that is, the days when the temperature crosses the threshold.
Then the maximum number of consecutive frost free days is given by
.. math::
\max(\mathbf{d}) \quad \mathrm{where} \quad d_j = (s_j - s_{j-1}) [t_{s_j} > thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. Note that this formula does not handle sequences at
the start and end of the series, but the numerical algorithm does.
"""
t = convert_units_to(thresh, tasmin)
group = (tasmin < t).resample(time=freq)
out = group.map(rl.longest_run, dim="time")
return to_agg_units(out, tasmin, "count")
@declare_units(pr="[precipitation]", thresh="[precipitation]")
def maximum_consecutive_dry_days(
pr: xarray.DataArray, thresh: str = "1 mm/day", freq: str = "YS"
) -> xarray.DataArray:
r"""Maximum number of consecutive dry days.
Return the maximum number of consecutive days within the period where precipitation
is below a certain threshold.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux.
thresh : str
Threshold precipitation on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
The maximum number of consecutive dry days (precipitation < threshold per period).
Notes
-----
Let :math:`\mathbf{p}=p_0, p_1, \ldots, p_n` be a daily precipitation series and :math:`thresh` the threshold
under which a day is considered dry. Then let :math:`\mathbf{s}` be the sorted vector of indices :math:`i` where
:math:`[p_i < thresh] \neq [p_{i+1} < thresh]`, that is, the days when the temperature crosses the threshold.
Then the maximum number of consecutive dry days is given by
.. math::
\max(\mathbf{d}) \quad \mathrm{where} \quad d_j = (s_j - s_{j-1}) [p_{s_j} > thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. Note that this formula does not handle sequences at
the start and end of the series, but the numerical algorithm does.
"""
t = convert_units_to(thresh, pr, "hydro")
group = (pr < t).resample(time=freq)
out = group.map(rl.longest_run, dim="time")
return to_agg_units(out, pr, "count")
@declare_units(tasmin="[temperature]", thresh="[temperature]")
def maximum_consecutive_frost_free_days(
tasmin: xarray.DataArray, thresh: str = "0 degC", freq: str = "YS"
) -> xarray.DataArray:
r"""Maximum number of consecutive frost free days (Tn >= 0℃).
Return the maximum number of consecutive days within the period where the
minimum temperature is above or equal to a certain threshold.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
thresh : str
Threshold temperature.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
The maximum number of consecutive frost free days (tasmin >= threshold per period).
Notes
-----
Let :math:`\mathbf{t}=t_0, t_1, \ldots, t_n` be a daily minimum temperature series and :math:`thresh` the threshold
above or equal to which a day is considered a frost free day. Let :math:`\mathbf{s}` be the sorted vector of
indices :math:`i` where :math:`[t_i <= thresh] \neq [t_{i+1} <= thresh]`, that is, the days when the temperature
crosses the threshold.
Then the maximum number of consecutive frost free days is given by
.. math::
\max(\mathbf{d}) \quad \mathrm{where} \quad d_j = (s_j - s_{j-1}) [t_{s_j} >= thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. Note that this formula does not handle sequences at
the start and end of the series, but the numerical algorithm does.
"""
t = convert_units_to(thresh, tasmin)
group = (tasmin >= t).resample(time=freq)
out = group.map(rl.longest_run, dim="time")
return to_agg_units(out, tasmin, "count")
@declare_units(tasmax="[temperature]", thresh="[temperature]")
def maximum_consecutive_tx_days(
tasmax: xarray.DataArray, thresh: str = "25 degC", freq: str = "YS"
) -> xarray.DataArray:
r"""Maximum number of consecutive days with tasmax above a threshold (summer days).
Return the maximum number of consecutive days within the period where
the maximum temperature is above a certain threshold.
Parameters
----------
tasmax : xarray.DataArray
Max daily temperature.
thresh : str
Threshold temperature.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
The maximum number of days with tasmax > thresh per periods (summer days).
Notes
-----
Let :math:`\mathbf{t}=t_0, t_1, \ldots, t_n` be a daily maximum temperature series and :math:`thresh` the threshold
above which a day is considered a summer day. Let :math:`\mathbf{s}` be the sorted vector of indices :math:`i`
where :math:`[t_i < thresh] \neq [t_{i+1} < thresh]`, that is, the days when the temperature crosses the threshold.
Then the maximum number of consecutive dry days is given by
.. math::
\max(\mathbf{d}) \quad \mathrm{where} \quad d_j = (s_j - s_{j-1}) [t_{s_j} > thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. Note that this formula does not handle sequences at
the start and end of the series, but the numerical algorithm does.
"""
t = convert_units_to(thresh, tasmax)
group = (tasmax > t).resample(time=freq)
out = group.map(rl.longest_run, dim="time")
return to_agg_units(out, tasmax, "count")
@declare_units(siconc="[]", areacello="[area]", thresh="[]")
def sea_ice_area(
siconc: xarray.DataArray, areacello: xarray.DataArray, thresh: str = "15 pct"
) -> xarray.DataArray:
"""Total sea ice area.
Sea ice area measures the total sea ice covered area where sea ice concentration is above a threshold,
usually set to 15%.
Parameters
----------
siconc : xarray.DataArray
Sea ice concentration (area fraction).
areacello : xarray.DataArray
Grid cell area (usually over the ocean).
thresh : str
Minimum sea ice concentration for a grid cell to contribute to the sea ice extent.
Returns
-------
xarray.DataArray, [length]^2
Sea ice area.
Notes
-----
To compute sea ice area over a subregion, first mask or subset the input sea ice concentration data.
References
----------
`What is the difference between sea ice area and extent
<https://nsidc.org/arcticseaicenews/faq/#area_extent>`_
"""
t = convert_units_to(thresh, siconc)
factor = convert_units_to("100 pct", siconc)
out = xarray.dot(siconc.where(siconc >= t, 0), areacello) / factor
out.attrs["units"] = areacello.units
return out
@declare_units(siconc="[]", areacello="[area]", thresh="[]")
def sea_ice_extent(
siconc: xarray.DataArray, areacello: xarray.DataArray, thresh: str = "15 pct"
) -> xarray.DataArray:
"""Total sea ice extent.
Sea ice extent measures the *ice-covered* area, where a region is considered ice-covered if its sea ice
concentration is above a threshold usually set to 15%.
Parameters
----------
siconc : xarray.DataArray
Sea ice concentration (area fraction).
areacello : xarray.DataArray
Grid cell area.
thresh : str
Minimum sea ice concentration for a grid cell to contribute to the sea ice extent.
Returns
-------
xarray.DataArray, [length]^2
Sea ice extent.
Notes
-----
To compute sea ice area over a subregion, first mask or subset the input sea ice concentration data.
References
----------
`What is the difference between sea ice area and extent
<https://nsidc.org/arcticseaicenews/faq/#area_extent>`_
"""
t = convert_units_to(thresh, siconc)
out = xarray.dot(siconc >= t, areacello)
out.attrs["units"] = areacello.units
return out
@declare_units(sfcWind="[speed]", thresh="[speed]")
def windy_days(
sfcWind: xarray.DataArray, thresh: str = "10.8 m s-1", freq: str = "MS"
) -> xarray.DataArray:
r"""Windy days.
The number of days with average near-surface wind speed above threshold.
Parameters
----------
sfcWind : xarray.DataArray
Daily average near-surface wind speed.
thresh : str
Threshold average near-surface wind speed on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days with average near-surface wind speed above threshold.
Notes
-----
Let :math:`WS_{ij}` be the windspeed at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
WS_{ij} >= Threshold [m s-1]
"""
thresh = convert_units_to(thresh, sfcWind)
out = threshold_count(sfcWind, ">=", thresh, freq)
out = to_agg_units(out, sfcWind, "count")
return out
@declare_units(tasmin="[temperature]", thresh="[temperature]")
def tropical_nights(
tasmin: xarray.DataArray,
thresh: str = "20.0 degC",
freq: str = "YS",
) -> xarray.DataArray:
"""Tropical nights.
The number of days with minimum daily temperature above threshold.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
thresh : str
Threshold temperature on which to base evaluation.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days with minimum daily temperature above threshold.
Notes
-----
Let :math:`TN_{ij}` be the daily minimum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TN_{ij} > Threshold [℃]
Warnings
--------
The `tropical_nights` indice is being deprecated in favour of `tn_days_above` with `thresh="20 degC"` by default.
The indicator reflects this change. This indice will be removed in a future version of xclim.
"""
warnings.warn(
"The `tropical_nights` indice is being deprecated in favour of `tn_days_above` with `thresh='20 degC'`. "
"This indice will be removed in `xclim>=0.28.0`. Please update your scripts accordingly.",
UserWarning,
stacklevel=3,
)
return tn_days_above(tasmin, thresh=thresh, freq=freq)
@declare_units(tas="[temperature]", thresh="[temperature]", sum_thresh="K days")
def degree_days_exceedance_date(
tas: xarray.DataArray,
thresh: str = "0 degC",
sum_thresh: str = "25 K days",
op: str = ">",
after_date: DayOfYearStr = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""Degree days exceedance date.
Day of year when the sum of degree days exceeds a threshold. Degree days are
computed above or below a given temperature threshold.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature.
thresh : str
Threshold temperature on which to base degree days evaluation.
sum_thresh : str
Threshold of the degree days sum.
op : {">", "gt", "<", "lt", ">=", "ge", "<=", "le"}
If equivalent to '>', degree days are computed as `tas - thresh` and if
equivalent to '<', they are computed as `thresh - tas`.
after_date: str, optional
Date at which to start the cumulative sum. In "mm-dd" format, defaults to the
start of the sampling period.
freq : str
Resampling frequency. If `after_date` is given, `freq` should be annual.
Returns
-------
xarray.DataArray, [dimensionless]
Degree-days exceedance date.
Notes
-----
Let :math:`TG_{ij}` be the daily mean temperature at day :math:`i` of period :math:`j`,
:math:`T` is the reference threshold and :math:`ST` is the sum threshold. Then, starting
at day :math:i_0:, the degree days exceedance date is the first day :math:`k` such that
.. math::
\begin{cases}
ST < \sum_{i=i_0}^{k} \max(TG_{ij} - T, 0) & \text{if $op$ is '>'} \\
ST < \sum_{i=i_0}^{k} \max(T - TG_{ij}, 0) & \text{if $op$ is '<'}
\end{cases}
The resulting :math:`k` is expressed as a day of year.
Cumulated degree days have numerous applications including plant and insect phenology.
See https://en.wikipedia.org/wiki/Growing_degree-day for examples.
"""
thresh = convert_units_to(thresh, "K")
tas = convert_units_to(tas, "K")
sum_thresh = convert_units_to(sum_thresh, "K days")
if op in ["<", "<=", "lt", "le"]:
c = thresh - tas
elif op in [">", ">=", "gt", "ge"]:
c = tas - thresh
else:
raise NotImplementedError(f"op: '{op}'.")
def _exceedance_date(grp):
strt_idx = rl.index_of_date(grp.time, after_date, max_idxs=1, default=0)
if (
strt_idx.size == 0
): # The date is not within the group. Happens at boundaries.
return xarray.full_like(grp.isel(time=0), np.nan, float).drop_vars("time") # type: ignore
return rl.first_run_after_date(
grp.where(grp.time >= grp.time[strt_idx][0]).cumsum("time") > sum_thresh,
window=1,
date=None,
)
out = c.clip(0).resample(time=freq).map(_exceedance_date)
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(tas))
return out
@declare_units(snd="[length]", thresh="[length]")
def winter_storm(
snd: xarray.DataArray, thresh: str = "25 cm", freq: str = "AS-JUL"
) -> xarray.DataArray:
"""Days with snowfall over threshold.
Number of days with snowfall accumulation greater or equal to threshold.
Parameters
----------
snd : xarray.DataArray
Surface snow depth.
thresh : str
Threshold on snowfall accumulation require to label an event a `winter storm`.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray
Number of days per period identified as winter storms.
Notes
-----
Snowfall accumulation is estimated by the change in snow depth.
"""
thresh = convert_units_to(thresh, snd)
# Compute daily accumulation
acc = snd.diff(dim="time")
# Winter storm condition
out = threshold_count(acc, ">=", thresh, freq)
out.attrs["units"] = to_agg_units(out, snd, "count")
return out
|
'''Build full state representation'''
import numpy as np
from pandas import DataFrame
class StateRepresentationBuilder():
'''
Implements section 3.2 of the paper. Builds state representation \
of raw entities
'''
def __init__(self, neighbor_radius=25):
self.tracked_entities = []
self.next_free_entity_id = 0
self.type_transition_matx = None
self.total_transitions = {}
self.do_not_exist = [] # entities to be removed as they no longer exist
self.sim_weights = [2, 1, 1]
self.neighbor_radius = neighbor_radius
def build_state(self, entities, found_types):
'''Tag entities across time, build interactions'''
if not self.tracked_entities and self.next_free_entity_id == 0:
# Init type transition matrix
self.type_transition_matx = DataFrame(0,
columns=['null'] + found_types,
index=['null'] + found_types)
for e_type in found_types:
# set initial transition to 0 because assumption: objects tend to stay the same
self.type_transition_matx.at[e_type, e_type] = 1
# init tracking for objects
self._init_tracking(entities)
else:
# Update type transition matrix if there are new types
num_current_types = self.type_transition_matx.shape[0]
for e_type in found_types:
if e_type not in self.type_transition_matx.index:
# New, never before seen entity type, make new entry in trans matrix
# make column
self.type_transition_matx.insert(num_current_types, e_type, 0)
# make row
self.type_transition_matx.loc[e_type] = np.zeros(num_current_types, dtype=int)
# set initial transition to 0 because assumption: objects tend to stay the same
self.type_transition_matx.at[e_type, e_type] = 1
# print(self.type_transition_matx)
# Update tracking for objects
self._update_tracking(entities)
final_repr = self._build_representation()
return final_repr
def restart(self):
'''Fresh start of a new episode'''
self.tracked_entities = []
self.next_free_entity_id = 0
self.do_not_exist = []
def _init_tracking(self, entities):
'''Set up tags for all existing entities'''
for entity in entities:
entity.id = self.next_free_entity_id
self.tracked_entities.append(entity)
self.next_free_entity_id += 1
def _is_same_entity(self, old_e, new_e):
'''Check whether new_e is a displaced version of old_e'''
similarity = 0
# Factor 1: euclidean distance
l_dist = 1 / (1 + np.linalg.norm(new_e.position-old_e.position))
# Factor 2: Transitions
l_trans = self.type_transition_matx.at[old_e.entity_type, new_e.entity_type] / \
self.total_transitions.setdefault(old_e.entity_type, 0)
if np.isnan(l_trans):
l_trans = 0
# factor 3: neighbors
l_neighbors = 1 / (1 + abs(new_e.n_neighbors - old_e.n_neighbors))
# print('----')
# print('before:', old_e.__dict__)
# print('new:', new_e.__dict__)
similarity = self.sim_weights[0] * l_dist + \
self.sim_weights[1] * l_trans + \
self.sim_weights[2] * l_neighbors
similarity = similarity/3
# print(l_dist, l_trans, l_neighbors, 'similarity:', similarity)
return similarity > 0.5
def _update_tracking(self, new_entities):
'''Track entities across time, using their last state'''
# if an entity is not matched with any in new entities,
# place it in possibly_disappeared, and remove it if encountered
# If there are any in possibly_disappeared by the time the
# iteration over new_entities is done, the entity has actually disappeared
possibly_disappeared = []
newly_nonexistent = []
for i, tracked_e in enumerate(self.tracked_entities):
# print(tracked_e.__dict__)
if not tracked_e.exists:
print('Marked for deletion next loop', tracked_e.__dict__)
print('---')
newly_nonexistent.append(i)
continue
for new_e_i, new_e in enumerate(new_entities):
# print('comparing', new_e.__dict__)
if self._is_same_entity(tracked_e, new_e):
# print('same entity')
# Update transition matrix
# (even if not transitioned, how often the type stays the same is important)
self._mark_transition(tracked_e.entity_type, new_e.entity_type)
self.tracked_entities[i].update_self(new_e)
# print('Updated', self.tracked_entities[i].__dict__)
if i in possibly_disappeared:
possibly_disappeared.remove(i)
del new_entities[new_e_i]
break
else:
# new entity, and/or tracked_e disappeared
print('match not found', tracked_e.__dict__)
print('---')
possibly_disappeared.append(i)
for disapp_idx in possibly_disappeared: # well, they definitely disappeared
# Mark transition in matrix
self._mark_transition(self.tracked_entities[disapp_idx].entity_type, 'null')
# Mark object as nonexistent, to be removed in the next timestep
# We do not delete it in this timestep because we have to show the agent
# that the entity disappeared
self.tracked_entities[disapp_idx].disappeared()
self.do_not_exist.reverse()
for dne_idx in self.do_not_exist:
print('DNE', dne_idx)
del self.tracked_entities[dne_idx]
self.do_not_exist = newly_nonexistent # to be removed next time
for entity_to_add in new_entities:
entity_to_add.id = self.next_free_entity_id
entity_to_add.appeared()
self.tracked_entities.append(entity_to_add)
# Mark transition in matrix
self._mark_transition('null', entity_to_add.entity_type)
# increment id for next appearing object
self.next_free_entity_id += 1
def _build_representation(self):
'''Build time-abstracted representation + object interactions'''
def interaction(el_1, el_2, loc_diff, types_before, types_after):
'''Make interaction dict for certain interaction params'''
return {
'interaction': (el_1.id, el_2.id),
'loc_difference': tuple(loc_diff),
'types_before': types_before,
'types_after': types_after
}
interactions = []
interactions_built = [] # pairs of entities for which interaction has already been built
# Build interactions
for entity in self.tracked_entities:
within_radius = [x for x in self.tracked_entities
if np.all((x.position - entity.position) < self.neighbor_radius*2)]
for w_r in within_radius:
sorted_e = (entity, w_r) if entity.entity_type < w_r.entity_type else (w_r, entity)
interact_ids = (sorted_e[0].id, sorted_e[1].id)
if interact_ids in interactions_built:
#interaction already built before
continue
# position change
loc_diff = (sorted_e[0].position - sorted_e[0].prev_state['position']) - \
(sorted_e[1].position - sorted_e[1].prev_state['position'])
types_before = (sorted_e[0].prev_state['entity_type'], sorted_e[1].prev_state['entity_type'])
types_after = (sorted_e[0].entity_type, sorted_e[1].entity_type)
if np.array_equal(loc_diff, (0, 0)) and np.array_equal(types_before, types_after):
# Empty interaction, don't add it
continue
interactions.append(interaction(sorted_e[0], sorted_e[1], loc_diff,
types_before, types_after))
interactions_built.append(interact_ids)
return interactions
def _mark_transition(self, from_type, to_type):
'''Mark type transition in transition matrix'''
self.total_transitions.setdefault(from_type, 0)
self.total_transitions[from_type] += 1
self.type_transition_matx.at[from_type, to_type] += 1
|
import math
from panda3d.core import *
from otp.nametag import NametagGlobals
from otp.nametag.MarginPopup import MarginPopup
from otp.nametag.Nametag import Nametag
from otp.nametag.NametagConstants import *
class Nametag2d(Nametag, MarginPopup):
def __init__(self):
Nametag.__init__(self, 8.0)
MarginPopup.__init__(self)
self.copied_np = None
self.attached_np = None
self.arrow = None
self.unknown_np = None
# self.setCullCallback()
self.cbNode = CallbackNode(self.getName() + '-cbNode')
self.cbNode.setCullCallback(PythonCallbackObject(self.cullCallback))
self.addChild(self.cbNode)
self.setName('unnamed')
self.contents = 3
self.chat_contents = 0
self.updateContents()
self.on = NametagGlobals._master_arrows_on
self.seq2d = 0
self.trans_vec = Vec3(0, 0, 0)
def setVisible(self, value):
self.visible = value
self.updateContents()
def manage(self, manager):
self.updateContents()
manager.managePopup(self)
def unmanage(self, manager):
Nametag.unmanage(self, manager)
manager.unmanagePopup(self)
def setObjectCode(self, objcode):
if self.group:
self.group.setObjectCode(objcode)
def getObjectCode(self):
if self.group:
return self.group.getObjectCode()
return 0
def getScore(self):
if self.group:
return 1000 - self.getDistance2()
return 0
def getDistance2(self):
if self.avatar:
np = self.avatar
else:
np = self.group.getAvatar()
if np.isEmpty():
return 0
return np.getPos(NametagGlobals._toon).lengthSquared()
def considerVisible(self):
from NametagGroup import NametagGroup
v2 = 0
do_update = True
if self.on != NametagGlobals._master_arrows_on:
self.on = NametagGlobals._master_arrows_on
v2 = 1
if self.seq2d == NametagGlobals._margin_prop_seq:
if not v2:
do_update = False
else:
self.seq2d = NametagGlobals._margin_prop_seq
if do_update:
self.updateContents()
if not self.chat_contents:
return 0
result = self.group.nametag3d_flag != 2
if NametagGlobals._onscreen_chat_forced and self.chat_contents & (Nametag.CSpeech | Nametag.CThought):
result = 1
self.group.setNametag3dFlag(0)
if result and self.group.getColorCode() in (NametagGroup.CCToonBuilding,
NametagGroup.CCSuitBuilding,
NametagGroup.CCHouseBuilding):
return self.getDistance2() < 1600
return result
def updateContents(self):
self.stopFlash()
if self.group:
self.setName(self.group.getName())
else:
self.setName('unnamed')
if self.copied_np:
self.copied_np.removeNode()
if self.attached_np:
self.attached_np.removeNode()
if self.arrow:
self.arrow.removeNode()
if self.unknown_np:
self.unknown_np.removeNode()
self.chat_contents = self.determineContents()
if not NametagGlobals._master_arrows_on:
self.chat_contents = self.chat_contents & ~1
if self.visible and self.isGroupManaged():
v10 = self.chat_contents
if v10 & Nametag.CSpeech:
self.generateChat(NametagGlobals._speech_balloon_2d)
elif v10 & Nametag.CThought:
self.generateChat(NametagGlobals._thought_balloon_2d)
elif v10 & Nametag.CName:
self.generateName()
def frameCallback(self):
if self.visible and self.popup_region:
self.seq = self.group.region_seq
if self.group:
self.group.updateRegions()
def rotateArrow(self):
if not self.arrow:
return
if self.avatar:
np = self.avatar
else:
np = self.group.getAvatar()
if not np:
return
relpos = np.getPos(NametagGlobals._camera) - NametagGlobals._toon.getPos(NametagGlobals._camera)
hpr = Vec3(0, 0, -math.atan2(relpos[1], relpos[0]) * 180 / math.pi)
scale = Vec3(0.5, 0.5, 0.5)
shear = Vec3(0, 0, 0)
temp_mat_3 = Mat3()
composeMatrix(temp_mat_3, scale, shear, hpr)
arrow_mat = Mat4(temp_mat_3, self.trans_vec)
self.arrow.setMat(arrow_mat)
def generateName(self):
v4 = self.getState()
v84 = NametagGlobals.getNameFg(self.group.getColorCode(), v4)
v75 = NametagGlobals.getNameBg(self.group.getColorCode(), v4)
v75[3] = max(v75[3], NametagGlobals._min_2d_alpha)
v75[3] = min(v75[3], NametagGlobals._max_2d_alpha)
v67 = NametagGlobals._card_pad[3] + self.group.name_frame[3]
v68 = self.group.name_frame[2] - NametagGlobals._card_pad[2]
wordwrap = self.group.getNameWordwrap()
v17 = self.cell_width / wordwrap * 2.0
v66 = 0.333 * (1.0 / v17) - (v68 + v67) * 0.5
v18 = min(1.0 / v17 - v67, v66)
v69 = Mat4(v17, 0, 0, 0,
0, v17, 0, 0,
0, 0, v17, 0,
0, 0, v18 * v17, 1.0)
a3 = v69
if v75[3] != 0.0:
card = CardMaker('nametag')
card.setFrame(self.group.name_frame[0] - NametagGlobals._card_pad[0],
self.group.name_frame[1] + NametagGlobals._card_pad[1],
v68, v67)
card.setColor(v75)
if NametagGlobals._nametag_card:
card.setSourceGeometry(NametagGlobals._nametag_card.node(),
NametagGlobals._nametag_card_frame)
self.attached_np = self.np.attachNewNode(card.generate())
self.attached_np.setMat(v69)
if v75[3] != 1.0:
self.attached_np.setTransparency(1)
if self.has_draw_order:
bin = config.GetString('nametag-fixed-bin', 'fixed')
self.attached_np.setBin(bin, self.draw_order)
self.copied_np = self.group.copyNameTo(self.np)
self.copied_np.setMat(a3)
if self.has_draw_order:
bin = config.GetString('nametag-fixed-bin', 'fixed')
self.copied_np.setBin(bin, self.draw_order)
self.copied_np.setColor(v84)
if v84[3] != 1.0:
self.copied_np.setTransparency(1)
reducer = SceneGraphReducer()
reducer.applyAttribs(self.copied_np.node())
reducer.applyAttribs(self.attached_np.node())
if NametagGlobals._arrow_model:
self.arrow = NametagGlobals._arrow_model.copyTo(self.np)
if self.has_draw_order:
bin = config.GetString('nametag-fixed-bin', 'fixed')
self.arrow.setBin(bin, self.draw_order)
self.trans_vec = a3.xformPoint(Point3(0, 0, v68 - 1.0))
color = NametagGlobals.getArrowColor(self.group.getColorCode())
self.arrow.setColor(color)
if color[3] != 1.0:
self.arrow.setTransparency(1)
self.rotateArrow()
elif self.arrow:
self.arrow.removeNode()
v69 = self.np.getNetTransform().getMat()
v69 = a3 * v69
v77 = v69.xformPoint(Point3(self.group.name_frame[0] - NametagGlobals._card_pad[0], 0, v68))
v80 = v69.xformPoint(Point3(self.group.name_frame[1] + NametagGlobals._card_pad[1], 0, v67))
frame = Vec4(v77[0], v80[0], v77[2], v80[2])
self.setRegion(frame, 0)
def generateChat(self, balloon):
v5 = self.getState()
text_color = NametagGlobals.getChatFg(self.group.getColorCode(), v5)
balloon_color = NametagGlobals.getChatBg(self.group.getColorCode(), v5)
if self.group.chat_flags & CFQuicktalker:
balloon_color = self.group.getQtColor()
balloon_color[3] = max(balloon_color[3], NametagGlobals._min_2d_alpha)
balloon_color[3] = min(balloon_color[3], NametagGlobals._max_2d_alpha)
text = self.group.getChat()
if self.group.name:
text = '%s: %s' % (self.group.name, text)
has_page_button = False
has_quit_button = False
if not self.group.has_timeout:
has_page_button = self.group.chat_flags & CFPageButton
if self.group.getPageNumber() >= self.group.getNumChatPages() - 1:
if self.group.chat_flags & CFQuitButton:
has_page_button = False
has_quit_button = True
page_button = None
if has_page_button:
page_button = NametagGlobals.getPageButton(v5)
elif has_quit_button:
page_button = NametagGlobals.getQuitButton(v5)
reversed = self.group.chat_flags & CFReversed
new_button = [None]
balloon_result = balloon.generate(text, self.group.getChatFont(), self.wordwrap,
text_color, balloon_color, False,
self.has_draw_order, self.draw_order,
page_button, self.group.willHaveButton(),
reversed, new_button)
self.unknown_np = self.np.attachNewNode(balloon_result)
v88 = 8.0 # XXX THIS IS A GUESS
v49 = 2 * self.cell_width
a6 = v49 / (v88 + 1.0)
v50 = balloon.text_height * balloon.hscale
v85 = balloon.hscale * 5.0
v88 = v50 * 0.5
v113 = -(balloon.hscale * 0.5 + v85)
v51 = -(NametagGlobals._balloon_text_origin[2] + v88)
v118 = Mat4(a6, 0, 0, 0,
0, a6, 0, 0,
0, 0, a6, 0,
v113 * a6, 0, v51 * a6, 1.0)
self.unknown_np.setMat(v118)
reducer = SceneGraphReducer()
reducer.applyAttribs(self.unknown_np.node())
v66 = self.np.getNetTransform().getMat()
# XXX THE LINES BELOW ARE A GUESS
v67 = v113 * a6
v68 = v51 * a6
v94 = v66.xformPoint(Point3(v67, 0.0, v68))
v97 = v66.xformPoint(Point3(-v67, 0.0, -v68))
frame = Vec4(v94[0], v97[0], v94[2], v97[2])
self.setRegion(frame, 0)
def cullCallback(self, *args):
self.rotateArrow()
if self.visible and self.popup_region:
self.seq = self.group.getRegionSeq()
|
# coding=utf-8
import pstats
from collections import OrderedDict
from operator import itemgetter
import jieba
import time
import scipy
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, spmatrix, coo_matrix
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from model.preprocessing import PreProcess
from extra.stop_words import SIGNAL_STOP_WORDS
from extra.vectorizer import MyCountVectorizer
class SimilaryModel(object):
"""
用于计算相似度的模型
"""
def __init__(self, dataframe, features_with_weight, src_article_num=None,
rec_article_num=3, title_fc_extra_weight=None, ndigits=2)
"""
:param dataframe: 类型为 pandas.DataFrame 类型
:param features_with_weight: 基于word生成词汇表的特征,类型为 OrderedDict
:param src_article_num: 为多少篇原文章生成推荐结果,默认为None,表示为所有输入数据生成推荐结果,
为int(需<=len(dataframe))时,表示为dataframe[0:src_article_num]文章生成推荐结果
:param rec_article_num: 指定每篇文章的推荐文章数目
:param ndigits: 指定生成相似度得分时保留的小数点位数
"""
if not isinstance(dataframe, pd.DataFrame):
raise TypeError("dataframe must be pandas.DataFrame!")
if not isinstance(features_with_weight, OrderedDict):
raise TypeError("word_features_with_weight must be OrderedDict!")
if src_article_num > len(dataframe):
raise ValueError(
"length of src_article_num should not exceed len(dataframe)")
self.dataframe = dataframe
self.features_with_weight = features_with_weight
self.features = list(zip(*self.features_with_weight.items())[0])
self.src_article_num = src_article_num
self.rec_article_num = rec_article_num
self.title_fc_extra_weight = title_fc_extra_weight
self.ndigits = ndigits
self.features_with_score = None
self.feature_matrixs = None
def _generate_vector(self):
"""
创建词汇表并生成文本向量
"""
data = self.dataframe
feature_matrixs = {}
count_vectorizers = {}
# print "-----------: %s" % ",".join(jieba.cut(u"达斯登 速干T恤男"))
for feature in self.features:
vect = CountVectorizer(analyzer="word", lowercase=True, tokenizer=jieba.cut, stop_words=SIGNAL_STOP_WORDS, binary=False) if feature in (
u"title_fc") else MyCountVectorizer(stop_words=("", "0", "-1"))
try:
feature_matrix = vect.fit_transform(data[feature])
feature_matrixs[feature] = feature_matrix
count_vectorizers[feature] = vect
except ValueError:
feature_matrixs[feature] = csr_matrix(
(data.shape[0], 1), dtype=np.int8)
count_vectorizers[feature] = None
print "feature[%s] is empty vocabulary; perhaps the documents only contain stop words!" % feature
self.feature_matrixs = feature_matrixs
self.count_vectorizers = count_vectorizers
return feature_matrixs
def calculate_similary(self):
"""
根据特征的文本向量生成相似度矩阵
"""
data = self.dataframe
feature_matrixs = self._generate_vector()
number = len(data) # 数据的个数
# 计算相似度矩阵
src_score_list = []
for feature in self.features:
sim = cosine_similarity(feature_matrixs[feature], dense_output=False) * self.features_with_weight.get(
feature)
src_score_list.append(sim)
# 生成内容为各个特征及对应的相似度得分的一个字典
self.features_with_score = dict(zip(self.features, src_score_list))
# 增加三级品类为空的文章的标题分词权重
if self.title_fc_extra_weight is not None:
base_array = np.where(
data[u"level_3_id"] == "", self.title_fc_extra_weight, 1)
weight_mat = np.array([base_array] * number)
# weight_mat = np.array([data["level_3_id_is_null"] * self.title_fc_extra_weight] * number)
self.features_with_score["title_fc"] = self.features_with_score["title_fc"].multiply(
weight_mat).tocsr()
# 初始化一个csr矩阵
sum_score = csr_matrix((number, number), dtype=np.float16)
for feature in self.features:
sum_score = sum_score + self.features_with_score.get(feature)
sum_score.setdiag([-1] * number) # 将自身向量与自身向量的相似度设为-1,即文章本身之间的相似度设为-1
self.score_csr_matrix = sum_score
return sum_score
def map_articles(self, res_format="json"):
"""
将生成的相似度分数矩阵与原始文章数据的文章id进行关联
:param res_format: 生成推荐结果的格式,json or list
:return:
"""
src_article_num = self.src_article_num if self.src_article_num else len(
self.dataframe) # 数据的个数
result = {} if res_format == "json" else []
for index in xrange(src_article_num): # 每一行的数据表示一篇文章跟其他文章的相似度分数列表
sum_score_row = np.around(self.score_csr_matrix.getrow(
index).toarray()[0], self.ndigits)
# print sum_score_row
single_score_row_list = [self.features_with_score.get(feature).getrow(index).toarray()[0]
for feature in self.features]
# 对分数为小数的进行位数取舍
single_score_row_list = map(lambda arr: np.around(
arr, self.ndigits), single_score_row_list)
# 将推荐的文章id和相对应的相似度分数列表进行匹配
rec_article_id_with_score = zip(
self.dataframe["article_id"], sum_score_row, *single_score_row_list)
# 按照总分来降序排序,选出排名 Top N
recs = sorted(rec_article_id_with_score, key=lambda item: item[1], reverse=True)[
0: self.rec_article_num]
# 源文章id
src_article_id = self.dataframe["article_id"].get(index)
if res_format == "json":
result[src_article_id] = recs
else:
tmp = [[src_article_id, ] + list(r) for r in recs]
result.extend(tmp)
return result
class CombineModel(object):
def __init__(self, dataframe, weights, similary_model=None, src_article_num=None, rec_article_num=3, ndigits=2):
self.dataframe = dataframe
self.similary = similary_model
self.weights = weights
self.src_article_num = src_article_num
self.rec_article_num = rec_article_num
self.ndigits = ndigits
self.score_csr_matrix = similary_model.calculate_similary()
def map_articles(self, res_format="json"):
src_article_num = self.src_article_num if self.src_article_num else len(
self.dataframe) # 数据的个数
result = {} if res_format == "json" else []
# heat = scipy.matrix([self.dataframe["sum_collect_comment"].tolist()] * number)
heat = self.weights.get("heat") * scipy.matrix(
[np.around(self.dataframe["log_heat"].values, self.ndigits)] * src_article_num)
score_simi = self.weights.get("similary") * self.score_csr_matrix
# sum_score_mat = 0.7 * self.score_csr_matrix + 0.3 * heat
sum_score_mat = score_simi + heat
for index in xrange(src_article_num): # 每一行的数据表示一篇文章跟其他文章的相似度分数列表
sum_score_row = np.around(
sum_score_mat[index].tolist()[0], self.ndigits)
single_score_row_list = [self.similary.features_with_score.get(feature).getrow(index).toarray()[0]
for feature in self.similary.features]
single_score_row_list.append(np.array(heat[index].tolist()[0]))
single_score_row_list.append(score_simi[index].toarray()[0])
# 对分数为小数的进行位数取舍
single_score_row_list = map(lambda arr: np.around(
arr, self.ndigits), single_score_row_list)
# 将推荐的文章id和相对应的相似度分数列表进行匹配
rec_article_id_with_score = zip(
self.dataframe["article_id"], sum_score_row, *single_score_row_list)
# 按照总分来降序排序,总分相同,按照相似度分数降序排列,选出排名 Top N
recs = sorted(rec_article_id_with_score, key=itemgetter(
1, -1), reverse=True)[0: self.rec_article_num]
# 源文章id
src_article_id = self.dataframe["article_id"].get(index)
if res_format == "json":
result[src_article_id] = recs
else:
tmp = [[src_article_id, ] + list(r) for r in recs]
result.extend(tmp)
return result
cols = [u"pro_id", u"level_4_id", u"level_3_id", u"level_2_id",
u"level_1_id", u"brand_id", u"title_fc", u"sex", u"crowd"]
weight = [1.2, 1, 0.8, 0.6, 0.4, 0.2, 0.2, 0.2, 0.2]
features_with_weight = OrderedDict(zip(cols, weight))
combine_weights = {u"similary": 0.85, u"heat": 0.05}
title_fc_extra_weight = 8
if __name__ == "__main__":
t0 = time.clock()
# input_file = "../off_line_file/article_data_v4_2017-02-17-16-59_part.csv"
input_file = "../off_line_file/title_fc_weight_part.csv"
d = pd.read_csv(input_file, sep=",", encoding="utf_8")
# d = pd.read_csv("../off_line_file/article_data_v5_2017-03-01-20-56.txt", sep="\t", encoding="utf8")
print d.shape
pp = PreProcess()
data = pp(d)
m1 = CountVectorizer(analyzer="word")
m2 = CountVectorizer(analyzer="char")
print m1.fit_transform(data.sex).toarray()
print m1.get_feature_names()
|
<reponame>ZengTaoxu/azure-cli
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, record_only
import pytest
class AzureManagementGroupsScenarioTest(ScenarioTest):
def test_list_managementgroups(self):
managementgroups_list = self.cmd(
'account management-group list').get_output_in_json()
self.assertIsNotNone(managementgroups_list)
self.assertTrue(len(managementgroups_list) > 0)
self.assertIsNotNone(managementgroups_list[0]["displayName"])
self.assertTrue(managementgroups_list[0]["id"].startswith(
"/providers/Microsoft.Management/managementGroups/"))
self.assertIsNotNone(managementgroups_list[0]["name"])
self.assertIsNotNone(managementgroups_list[0]["tenantId"])
self.assertEqual(
managementgroups_list[0]["type"],
"Microsoft.Management/managementGroups")
def test_show_managementgroup(self):
self.cmd('account management-group create --name testcligetgroup')
self.cmd('account management-group create --name testcligetgroup1 --parent /providers/Microsoft.Management/managementGroups/testcligetgroup')
managementgroup_get = self.cmd(
'account management-group show --name testcligetgroup1').get_output_in_json()
self.cmd('account management-group delete --name testcligetgroup1')
self.cmd('account management-group delete --name testcligetgroup')
self.assertIsNotNone(managementgroup_get)
self.assertIsNone(managementgroup_get["children"])
self.assertIsNotNone(managementgroup_get["details"])
self.assertEqual(
managementgroup_get["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup1")
self.assertEqual(managementgroup_get["name"], "testcligetgroup1")
self.assertEqual(
managementgroup_get["displayName"],
"testcligetgroup1")
self.assertEqual(
managementgroup_get["details"]["parent"]["displayName"],
"testcligetgroup")
self.assertEqual(
managementgroup_get["details"]["parent"]["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup")
self.assertEqual(
managementgroup_get["details"]["parent"]["name"],
"testcligetgroup")
self.assertIsNotNone(managementgroup_get["tenantId"])
self.assertEqual(
managementgroup_get["type"],
"Microsoft.Management/managementGroups")
def test_show_managementgroup_with_expand(self):
self.cmd('account management-group create --name testcligetgroup')
self.cmd('account management-group create --name testcligetgroup1 --parent testcligetgroup')
self.cmd('account management-group create --name testcligetgroup2 --parent /providers/Microsoft.Management/managementGroups/testcligetgroup1')
managementgroup_get = self.cmd(
'account management-group show --name testcligetgroup1 --expand').get_output_in_json()
self.cmd('account management-group delete --name testcligetgroup2')
self.cmd('account management-group delete --name testcligetgroup1')
self.cmd('account management-group delete --name testcligetgroup')
self.assertIsNotNone(managementgroup_get)
self.assertIsNotNone(managementgroup_get["children"])
self.assertIsNotNone(managementgroup_get["details"])
self.assertEqual(
managementgroup_get["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup1")
self.assertEqual(managementgroup_get["name"], "testcligetgroup1")
self.assertEqual(
managementgroup_get["displayName"],
"testcligetgroup1")
self.assertEqual(
managementgroup_get["details"]["parent"]["displayName"],
"testcligetgroup")
self.assertEqual(
managementgroup_get["details"]["parent"]["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup")
self.assertEqual(
managementgroup_get["details"]["parent"]["name"],
"testcligetgroup")
self.assertIsNotNone(managementgroup_get["tenantId"])
self.assertEqual(
managementgroup_get["type"],
"Microsoft.Management/managementGroups")
self.assertEqual(
managementgroup_get["children"][0]["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup2")
self.assertEqual(
managementgroup_get["children"][0]["type"],
"Microsoft.Management/managementGroups")
self.assertEqual(
managementgroup_get["children"][0]["displayName"],
"testcligetgroup2")
self.assertEqual(
managementgroup_get["children"][0]["name"],
"testcligetgroup2")
def test_show_managementgroup_with_expand_and_recurse(self):
self.cmd('account management-group create --name testcligetgroup1')
self.cmd('account management-group create --name testcligetgroup2 --parent /providers/Microsoft.Management/managementGroups/testcligetgroup1')
self.cmd('account management-group create --name testcligetgroup3 --parent testcligetgroup2')
self.cmd('account management-group create --name testcligetgroup4 --parent /providers/Microsoft.Management/managementGroups/testcligetgroup3')
managementgroup_get = self.cmd(
'account management-group show --name testcligetgroup2 --expand --recurse').get_output_in_json()
self.cmd('account management-group delete --name testcligetgroup4')
self.cmd('account management-group delete --name testcligetgroup3')
self.cmd('account management-group delete --name testcligetgroup2')
self.cmd('account management-group delete --name testcligetgroup1')
self.assertIsNotNone(managementgroup_get)
self.assertIsNotNone(managementgroup_get["children"])
self.assertIsNotNone(managementgroup_get["details"])
self.assertEqual(
managementgroup_get["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup2")
self.assertEqual(managementgroup_get["name"], "testcligetgroup2")
self.assertEqual(
managementgroup_get["displayName"],
"testcligetgroup2")
self.assertEqual(
managementgroup_get["details"]["parent"]["displayName"],
"testcligetgroup1")
self.assertEqual(
managementgroup_get["details"]["parent"]["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup1")
self.assertEqual(
managementgroup_get["details"]["parent"]["name"],
"testcligetgroup1")
self.assertIsNotNone(managementgroup_get["tenantId"])
self.assertEqual(
managementgroup_get["type"],
"Microsoft.Management/managementGroups")
self.assertEqual(
managementgroup_get["children"][0]["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup3")
self.assertEqual(
managementgroup_get["children"][0]["type"],
"Microsoft.Management/managementGroups")
self.assertEqual(
managementgroup_get["children"][0]["displayName"],
"testcligetgroup3")
self.assertEqual(
managementgroup_get["children"][0]["name"],
"testcligetgroup3")
self.assertEqual(
managementgroup_get["children"][0]["children"][0]["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup4")
self.assertEqual(
managementgroup_get["children"][0]["children"][0]["type"],
"Microsoft.Management/managementGroups")
self.assertEqual(
managementgroup_get["children"][0]["children"][0]["displayName"],
"testcligetgroup4")
self.assertEqual(
managementgroup_get["children"][0]["children"][0]["name"],
"testcligetgroup4")
def test_create_managementgroup(self):
name = "testcligroup"
displayName = "testcligroup"
managementgroup_create = self.cmd(
'account management-group create --name ' +
name).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.assertIsNotNone(managementgroup_create)
self.assertIsNotNone(managementgroup_create["details"])
self.assertEqual(
managementgroup_create["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_create["name"], name)
self.assertEqual(
managementgroup_create["displayName"],
displayName)
self.assertEqual(
managementgroup_create["details"]["parent"]["displayName"],
"Root Management Group")
self.assertEqual(
managementgroup_create["details"]["parent"]["id"],
"/providers/Microsoft.Management/managementGroups/" +
managementgroup_create["tenantId"])
self.assertEqual(
managementgroup_create["details"]["parent"]["name"],
managementgroup_create["tenantId"])
self.assertIsNotNone(managementgroup_create["tenantId"])
self.assertEqual(
managementgroup_create["type"],
"Microsoft.Management/managementGroups")
def test_create_managementgroup_with_displayname(self):
name = "testcligroup"
displayName = "TestCliDisplayName"
managementgroup_create = self.cmd(
'account management-group create --name ' +
name +
' --display-name ' +
displayName).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.assertIsNotNone(managementgroup_create)
self.assertIsNotNone(managementgroup_create["details"])
self.assertEqual(
managementgroup_create["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_create["name"], name)
self.assertEqual(
managementgroup_create["displayName"],
displayName)
self.assertEqual(
managementgroup_create["details"]["parent"]["displayName"],
"Root Management Group")
self.assertEqual(
managementgroup_create["details"]["parent"]["id"],
"/providers/Microsoft.Management/managementGroups/" +
managementgroup_create["tenantId"])
self.assertEqual(
managementgroup_create["details"]["parent"]["name"],
managementgroup_create["tenantId"])
self.assertIsNotNone(managementgroup_create["tenantId"])
self.assertEqual(
managementgroup_create["type"],
"Microsoft.Management/managementGroups")
def test_create_managementgroup_with_parentid(self):
name = "testcligroupchild"
displayName = "testcligroupchild"
parentId = "/providers/Microsoft.Management/managementGroups/testcligroup"
parentName = "testcligroup"
self.cmd('account management-group create --name ' + parentName)
managementgroup_create = self.cmd(
'account management-group create --name ' +
name +
' --parent ' +
parentId).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.cmd('account management-group delete --name ' + parentName)
self.assertIsNotNone(managementgroup_create)
self.assertIsNotNone(managementgroup_create["details"])
self.assertEqual(
managementgroup_create["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_create["name"], name)
self.assertEqual(
managementgroup_create["displayName"],
displayName)
self.assertEqual(
managementgroup_create["details"]["parent"]["displayName"],
parentName)
self.assertEqual(
managementgroup_create["details"]["parent"]["id"],
parentId)
self.assertEqual(
managementgroup_create["details"]["parent"]["name"],
parentName)
self.assertIsNotNone(managementgroup_create["tenantId"])
self.assertEqual(
managementgroup_create["type"],
"Microsoft.Management/managementGroups")
def test_create_managementgroup_with_displayname_and_parentid(self):
name = "testcligroupchild"
displayName = "testcligroupchildDisplayName"
parentId = "/providers/Microsoft.Management/managementGroups/testcligroup"
parentName = "testcligroup"
self.cmd('account management-group create --name ' + parentName)
managementgroup_create = self.cmd(
'account management-group create --name ' +
name +
' --display-name ' +
displayName +
' --parent ' +
parentName).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.cmd('account management-group delete --name ' + parentName)
self.assertIsNotNone(managementgroup_create)
self.assertIsNotNone(managementgroup_create["details"])
self.assertEqual(
managementgroup_create["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_create["name"], name)
self.assertEqual(
managementgroup_create["displayName"],
displayName)
self.assertEqual(
managementgroup_create["details"]["parent"]["displayName"],
parentName)
self.assertEqual(
managementgroup_create["details"]["parent"]["id"],
parentId)
self.assertEqual(
managementgroup_create["details"]["parent"]["name"],
parentName)
self.assertIsNotNone(managementgroup_create["tenantId"])
self.assertEqual(
managementgroup_create["type"],
"Microsoft.Management/managementGroups")
def test_update_managementgroup_with_displayname(self):
name = "testcligroup"
displayName = "testcligroupDisplayName"
self.cmd('account management-group create --name ' + name)
managementgroup_update = self.cmd(
'account management-group update --name ' +
name +
' --display-name ' +
displayName).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.assertIsNotNone(managementgroup_update)
self.assertIsNotNone(managementgroup_update["details"])
self.assertEqual(
managementgroup_update["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_update["name"], name)
self.assertEqual(managementgroup_update["displayName"], displayName)
self.assertEqual(
managementgroup_update["details"]["parent"]["displayName"],
"Root Management Group")
self.assertEqual(
managementgroup_update["details"]["parent"]["id"],
"/providers/Microsoft.Management/managementGroups/" +
managementgroup_update["tenantId"])
self.assertEqual(
managementgroup_update["details"]["parent"]["name"],
managementgroup_update["tenantId"])
self.assertIsNotNone(managementgroup_update["tenantId"])
self.assertEqual(
managementgroup_update["type"],
"Microsoft.Management/managementGroups")
def test_update_managementgroup_with_parentid(self):
name = "testcligroupchild"
displayName = "testcligroupchild"
parentId = "/providers/Microsoft.Management/managementGroups/testcligroup"
parentName = "testcligroup"
self.cmd('account management-group create --name ' + parentName)
self.cmd('account management-group create --name ' + name)
managementgroup_update = self.cmd(
'account management-group update --name ' +
name +
' --parent ' +
parentId).get_output_in_json()
print(managementgroup_update)
self.cmd('account management-group delete --name ' + name)
self.cmd('account management-group delete --name ' + parentName)
self.assertIsNotNone(managementgroup_update)
self.assertIsNotNone(managementgroup_update["details"])
self.assertEqual(
managementgroup_update["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_update["name"], name)
self.assertEqual(managementgroup_update["displayName"], displayName)
self.assertEqual(
managementgroup_update["details"]["parent"]["displayName"],
parentName)
self.assertEqual(
managementgroup_update["details"]["parent"]["id"],
parentId)
self.assertEqual(
managementgroup_update["details"]["parent"]["name"],
parentName)
self.assertIsNotNone(managementgroup_update["tenantId"])
self.assertEqual(
managementgroup_update["type"],
"Microsoft.Management/managementGroups")
def test_update_managementgroup_with_displayname_and_parentid(self):
name = "testcligroupchild"
displayName = "testcligroupchild"
parentId = "/providers/Microsoft.Management/managementGroups/testcligroup"
parentName = "testcligroup"
self.cmd('account management-group create --name ' + parentName)
self.cmd('account management-group create --name ' + name)
managementgroup_update = self.cmd(
'account management-group update --name ' +
name +
' --display-name ' +
displayName +
' --parent ' +
parentName).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.cmd('account management-group delete --name ' + parentName)
self.assertIsNotNone(managementgroup_update)
self.assertIsNotNone(managementgroup_update["details"])
self.assertEqual(
managementgroup_update["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_update["name"], name)
self.assertEqual(managementgroup_update["displayName"], displayName)
self.assertEqual(
managementgroup_update["details"]["parent"]["displayName"],
parentName)
self.assertEqual(
managementgroup_update["details"]["parent"]["id"],
parentId)
self.assertEqual(
managementgroup_update["details"]["parent"]["name"],
parentName)
self.assertIsNotNone(managementgroup_update["tenantId"])
self.assertEqual(
managementgroup_update["type"],
"Microsoft.Management/managementGroups")
def test_create_delete_group_managementgroup(self):
self.cmd('account management-group create --name testcligroup')
self.cmd('account management-group delete --name testcligroup')
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import pytest
import dhtmlparser
from harvester.scrappers import cpress_cz
# Variables ===================================================================
# Functions & objects =========================================================
def test_parse_alt_title():
dom = dhtmlparser.parseString(
"""
<div class="polozka">
<div class="polozka_obrazek">
<a href="zahadna-jizda-kralu/">
<img width="90" alt="Záhadná jízda králů" src="typo3temp/pics/8def5efbad.jpg" height="140" /> </a>
</div>
</div>
"""
)
alt_title = cpress_cz._parse_alt_title(dom)
assert alt_title == "Záhadná jízda králů"
def test_parse_alt_title_not_found():
dom = dhtmlparser.parseString(
"""
<div class="polozka">
<div class="polozka_obrazek">
<a href="zahadna-jizda-kralu/">
</div>
"""
)
with pytest.raises(UserWarning):
cpress_cz._parse_alt_title(dom)
def test_parse_alt_title_param_not_found():
dom = dhtmlparser.parseString(
"""
<div class="polozka">
<div class="polozka_obrazek">
<a href="zahadna-jizda-kralu/">
</div>
"""
)
with pytest.raises(UserWarning):
cpress_cz._parse_alt_title(dom)
def test_parse_alt_url():
dom = dhtmlparser.parseString(
"""
<div class="polozka">
<div class="polozka_obrazek">
<a href="zahadna-jizda-kralu/">
<img width="90" alt="Záhadná jízda králů" src="typo3temp/pics/8def5efbad.jpg" height="140" /> </a>
</div>
<div class="polozka_obsah">
<div class="polozka_popisy">
<div class="polozka_nazev">
</div>
<div class="polozka_autor"><a href="autori/autor/jiri-jilik/"><NAME></a></div>
<div class="polozka_podtitul">Nová kniha autora bestselleru Žítkovské čarování!</div>
</div>
<div class="polozka_cena">199 Kč</div>
<div class="polozka_detail"><a href="zahadna-jizda-kralu/">Detail knihy</a></div>
</div>
</div>
"""
)
url = cpress_cz._parse_alt_url(dom)
assert url
assert url == cpress_cz.normalize_url(cpress_cz.BASE_URL, "zahadna-jizda-kralu/")
def test_parse_alt_url_not_found():
dom = dhtmlparser.parseString(
"""
<div class="polozka">
<div class="polozka_obrazek">
<img width="90" alt="Záhadná jízda králů" src="typo3temp/pics/8def5efbad.jpg" height="140" /> </a>
</div>
<div class="polozka_obsah">
<div class="polozka_popisy">
<div class="polozka_nazev">
</div>
<div class="polozka_autor"><a href="autori/autor/jiri-jilik/"><NAME></a></div>
<div class="polozka_podtitul">Nová kniha autora bestselleru Žítkovské čarování!</div>
</div>
<div class="polozka_cena">199 Kč</div>
</div>
</div>
"""
)
url = cpress_cz._parse_alt_url(dom)
assert url is None
def test_parse_title_url():
dom = dhtmlparser.parseString(
"""
<div class="polozka">
<div class="polozka_obrazek">
<a href="zahadna-jizda-kralu/">
<img width="90" alt="Záhadná jízda králů" src="typo3temp/pics/8def5efbad.jpg" height="140" /> </a>
</div>
<div class="polozka_obsah">
<div class="polozka_popisy">
<div class="polozka_nazev">
<a href="zahadna-jizda-kralu/">Záhadná jízda králů</a>
</div>
<div class="polozka_autor"><a href="autori/autor/jiri-jilik/"><NAME></a></div>
<div class="polozka_podtitul">Nová kniha autora bestselleru Žítkovské čarování!</div>
</div>
<div class="polozka_cena">199 Kč</div>
<div class="polozka_detail"><a href="zahadna-jizda-kralu/">Detail knihy</a></div>
</div>
</div>
"""
)
title, url = cpress_cz._parse_title_url(dom)
assert title == "Záhadná jízda králů"
assert url == cpress_cz.normalize_url(cpress_cz.BASE_URL, "zahadna-jizda-kralu/")
def test_parse_title_url_tag_not_found():
dom = dhtmlparser.parseString(
"""
<div class="polozka">
<div class="polozka_obrazek">
<a href="zahadna-jizda-kralu/">
<img width="90" alt="Záhadná jízda králů" src="typo3temp/pics/8def5efbad.jpg" height="140" /> </a>
</div>
<div class="polozka_obsah">
<div class="polozka_popisy">
<div class="polozka_nazev">
</div>
<div class="polozka_autor"><a href="autori/autor/jiri-jilik/"><NAME></a></div>
<div class="polozka_podtitul">Nová kniha autora bestselleru Žítkovské čarování!</div>
</div>
<div class="polozka_cena">199 Kč</div>
<div class="polozka_detail"><a href="zahadna-jizda-kralu/">Detail knihy</a></div>
</div>
</div>
"""
)
title, url = cpress_cz._parse_title_url(dom)
assert title == "Záhadná jízda králů"
assert url == cpress_cz.normalize_url(cpress_cz.BASE_URL, "zahadna-jizda-kralu/")
def test_parse_title_url_url_not_found():
dom = dhtmlparser.parseString(
"""
<div class="polozka">
<div class="polozka_obrazek">
<img width="90" alt="Záhadná jízda králů" src="typo3temp/pics/8def5efbad.jpg" height="140" /> </a>
</div>
<div class="polozka_obsah">
<div class="polozka_popisy">
<div class="polozka_nazev">
</div>
<div class="polozka_autor"><a href="autori/autor/jiri-jilik/"><NAME></a></div>
<div class="polozka_podtitul">Nová kniha autora bestselleru Žítkovské čarování!</div>
</div>
<div class="polozka_cena">199 Kč</div>
<div class="polozka_detail">Detail knihy</div>
</div>
</div>
"""
)
title, url = cpress_cz._parse_title_url(dom)
assert title == "Záhadná jízda králů"
assert url is None
def test_parse_authors():
dom = dhtmlparser.parseString(
"""
<div class="polozka">
<div class="polozka_obrazek">
<a href="zahadna-jizda-kralu/">
<img width="90" alt="Záhadná jízda králů" src="typo3temp/pics/8def5efbad.jpg" height="140" /> </a>
</div>
<div class="polozka_obsah">
<div class="polozka_popisy">
<div class="polozka_nazev">
<a href="zahadna-jizda-kralu/">Záhadná jízda králů</a>
</div>
<div class="polozka_autor"><a href="autori/autor/jiri-jilik/"><NAME></a></div>
<div class="polozka_podtitul">Nová kniha autora bestselleru Žítkovské čarování!</div>
</div>
<div class="polozka_cena">199 Kč</div>
<div class="polozka_detail"><a href="zahadna-jizda-kralu/">Detail knihy</a></div>
</div>
</div>
"""
)
authors = cpress_cz._parse_authors(dom)
assert authors
assert authors[0].name == "<NAME>"
assert authors[0].URL == cpress_cz.normalize_url(cpress_cz.BASE_URL, "autori/autor/jiri-jilik/")
def test_parse_authors_no_authors():
dom = dhtmlparser.parseString(
"""
<div class="polozka">
<div class="polozka_obrazek">
<a href="zahadna-jizda-kralu/">
<img width="90" alt="Záhadná jízda králů" src="typo3temp/pics/8def5efbad.jpg" height="140" /> </a>
</div>
<div class="polozka_obsah">
<div class="polozka_popisy">
<div class="polozka_nazev">
<a href="zahadna-jizda-kralu/">Záhadná jízda králů</a>
</div>
<div class="polozka_autor"></div>
<div class="polozka_podtitul">Nová kniha autora bestselleru Žítkovské čarování!</div>
</div>
<div class="polozka_cena">199 Kč</div>
<div class="polozka_detail"><a href="zahadna-jizda-kralu/">Detail knihy</a></div>
</div>
</div>
"""
)
authors = cpress_cz._parse_authors(dom)
assert authors == []
def test_parse_authors_no_authors_tag():
dom = dhtmlparser.parseString(
"""
<div class="polozka">
<div class="polozka_obrazek">
<a href="zahadna-jizda-kralu/">
<img width="90" alt="Záhadná jízda králů" src="typo3temp/pics/8def5efbad.jpg" height="140" /> </a>
</div>
<div class="polozka_obsah">
<div class="polozka_popisy">
<div class="polozka_nazev">
<a href="zahadna-jizda-kralu/">Záhadná jízda králů</a>
</div>
<div class="polozka_podtitul">Nová kniha autora bestselleru Žítkovské čarování!</div>
</div>
<div class="polozka_cena">199 Kč</div>
<div class="polozka_detail"><a href="zahadna-jizda-kralu/">Detail knihy</a></div>
</div>
</div>
"""
)
authors = cpress_cz._parse_authors(dom)
assert authors == []
def test_parse_authors_multiple_authors():
dom = dhtmlparser.parseString(
"""
<div class="polozka">
<div class="polozka_obrazek">
<a href="zahadna-jizda-kralu/">
<img width="90" alt="Záhadná jízda králů" src="typo3temp/pics/8def5efbad.jpg" height="140" /> </a>
</div>
<div class="polozka_obsah">
<div class="polozka_popisy">
<div class="polozka_nazev">
<a href="zahadna-jizda-kralu/">Záhadná jízda králů</a>
</div>
<div class="polozka_autor"><a href="autori/autor/leos-kopecky/"><NAME></a>, <a href="autori/autor/roswitha-kammerl/"><NAME></a></div>
<div class="polozka_podtitul">Nová kniha autora bestselleru Žítkovské čarování!</div>
</div>
<div class="polozka_cena">199 Kč</div>
<div class="polozka_detail"><a href="zahadna-jizda-kralu/">Detail knihy</a></div>
</div>
</div>
"""
)
authors = cpress_cz._parse_authors(dom)
assert authors
assert len(authors) == 2
assert authors[0].name == "<NAME>"
assert authors[0].URL == cpress_cz.normalize_url(cpress_cz.BASE_URL, "autori/autor/leos-kopecky/")
assert authors[1].name == "<NAME>"
assert authors[1].URL == cpress_cz.normalize_url(cpress_cz.BASE_URL, "autori/autor/roswitha-kammerl/")
def test_parse_price():
dom = dhtmlparser.parseString(
"""
<div class="kniha_detail_cena">
<ul>
<li><label>((availability)):</label> <span>((availability_available))</span></li>
<li><label>Doporučená cena:</label> <span class="cena">299 Kč</span></li>
</ul>
</div>
"""
)
price = cpress_cz._parse_price(dom)
assert price == "299 Kč"
def test_parse_price_not_found():
dom = dhtmlparser.parseString(
"""
<div class="kniha_detail_cena">
<ul>
<li><label>((availability)):</label> <span>((availability_available))</span></li>
</ul>
</div>
"""
)
with pytest.raises(UserWarning):
cpress_cz._parse_price(dom)
def test_parse_ean_date_format():
dom = dhtmlparser.parseString(
"""
<table>
<tr><th>Autor:</th> <td><a href="autori/autor/"> </a></td></tr>
<tr><th>Žánr:</th> <td><a href="vydali-jsme/?tx_odbooks%5Bgenre%5D=93&cHash=718a579059d52191c53e0eb0125608c2">komiks</a></td></tr>
<tr><th>Datum vydání:</th> <td>06. 08. 2014</td></tr>
<tr><th>EAN:</th> <td>9788026404620</td></tr>
</table>
<table>
<tr><th>Formát:</th> <td>210 x 297 mm brožovaná lepená</td></tr>
</table>
<br/>
"""
)
ean = cpress_cz._parse_ean(dom)
date = cpress_cz._parse_date(dom)
format = cpress_cz._parse_format(dom)
assert ean == "9788026404620"
assert date == "06. 08. 2014"
assert format == "210 x 297 mm brožovaná lepená"
def test_parse_ean_date_format_not_found():
dom = dhtmlparser.parseString(
"""
<table>
<tr><th>Autor:</th> <td><a href="autori/autor/"> </a></td></tr>
<tr><th>Žánr:</th> <td><a href="vydali-jsme/?tx_odbooks%5Bgenre%5D=93&cHash=718a579059d52191c53e0eb0125608c2">komiks</a></td></tr>
<tr><th>EAN:</th></tr>
</table>
<table>
</table>
<br/>
"""
)
ean = cpress_cz._parse_ean(dom)
date = cpress_cz._parse_date(dom)
format = cpress_cz._parse_format(dom)
assert ean is None
assert date is None
assert format is None
def test_parse_description():
dom = dhtmlparser.parseString(
"""
<div class="kniha_detail_autor"><a href="autori/autor/"> </a></div>
<div class="kniha_detail_text">
<p><br/> Description is <br/> here.<br/></span><span class="tecky"></span></p>
<span><a href="kaasdzasd#" id="vice">[více]</a></span>
</div>
<script>
"""
)
description = cpress_cz._parse_description(dom)
assert description
assert description == "Description is \n here."
def test_parse_description_not_found():
dom = dhtmlparser.parseString(
"""
<div class="kniha_detail_autor"><a href="autori/autor/"> </a></div>
<script>
"""
)
description = cpress_cz._parse_description(dom)
assert description is None
|
"""TEMPLATES COMMANDS"""
import mimetypes
import logging
from pathlib import Path
import click
from mldock.config_managers.cli import ModelConfigManager, CliConfigureManager
from mldock.config_managers.project import MLDockConfigManager
from mldock.terminal import ProgressLogger
from mldock.platform_helpers.mldock.storage.pyarrow import (
upload_assets,
download_assets,
)
from mldock.api.assets import infer_filesystem_type
click.disable_unicode_literals_warning = True
logger = logging.getLogger("mldock")
MLDOCK_CONFIG_NAME = "mldock.yaml"
@click.group()
def models():
"""
Commands to create, update and manage models for container projects
"""
@click.command()
@click.option(
"--channel",
help="asset channel name. Directory name, within project model/ to store assets",
required=True,
type=str,
)
@click.option(
"--name",
help="asset filename name. File name, within project model/<channel> in which model artifact will be found",
required=True,
type=str,
)
@click.option(
"--project_directory",
"--dir",
"-d",
help="mldock container project.",
required=True,
type=click.Path(
exists=False,
file_okay=False,
dir_okay=True,
writable=True,
readable=True,
resolve_path=False,
allow_dash=False,
path_type=None,
),
)
@click.option(
"--remote", help="mldock remote to use to store or fetch dataset", type=str
)
@click.option(
"--remote_path", help="relative path in remote to store data artifact", type=str
)
@click.option("--mime_type", "--type", help="type of file based on mimetypes", type=str)
@click.option(
"--compression",
help="type of file based on mimetypes",
type=click.Choice(["zip"], case_sensitive=False),
)
def create(
channel, name, project_directory, remote, remote_path, mime_type, compression
):
"""
Command to create models manifest for mldock enabled container projects.
"""
try:
if not Path(project_directory, MLDOCK_CONFIG_NAME).exists():
raise Exception(
(
"Path '{}' was not an mldock project. "
"Confirm this directory is correct, otherwise "
"create one.".format(project_directory)
)
)
if mime_type is None:
mime_type = mimetypes.guess_type(name)
if isinstance(mime_type, (list, tuple)):
mime_type = mime_type[0]
if remote_path is None:
remote_path = channel
mldock_manager = MLDockConfigManager(
filepath=Path(project_directory, MLDOCK_CONFIG_NAME)
)
# get mldock_module_dir name
mldock_config = mldock_manager.get_config()
model_channels = ModelConfigManager(
config=mldock_config.get("model", []),
base_path=Path(project_directory, "model"),
)
model_channels.add_asset(
channel=channel,
filename=name,
type=mime_type,
remote=remote,
compression=compression,
remote_path=remote_path,
)
model_channels.write_gitignore()
mldock_manager.update_model_channels(models=model_channels.get_config())
mldock_manager.write_file()
except Exception as exception:
logger.error(exception)
raise
@click.command()
@click.option(
"--channel",
help="asset channel name. Directory name, within project model/ to store assets",
required=True,
type=str,
)
@click.option(
"--name",
help="asset filename name. File name, within project model/<channel> in which model artifact will be found",
required=True,
type=str,
)
@click.option(
"--project_directory",
"--dir",
"-d",
help="mldock container project.",
required=True,
type=click.Path(
exists=False,
file_okay=False,
dir_okay=True,
writable=True,
readable=True,
resolve_path=False,
allow_dash=False,
path_type=None,
),
)
@click.option(
"--remote", help="mldock remote to use to store or fetch dataset", type=str
)
@click.option(
"--remote_path", help="relative path in remote to store data artifact", type=str
)
@click.option("--mime_type", "--type", help="type of file based on mimetypes", type=str)
@click.option(
"--compression",
help="type of file based on mimetypes",
type=click.Choice(["zip"], case_sensitive=False),
)
def update(
channel, name, project_directory, remote, remote_path, mime_type, compression
):
"""
Command to create models manifest for mldock enabled container projects.
"""
try:
if not Path(project_directory, MLDOCK_CONFIG_NAME).exists():
raise Exception(
(
"Path '{}' was not an mldock project. "
"Confirm this directory is correct, otherwise "
"create one.".format(project_directory)
)
)
if mime_type is None:
mime_type = mimetypes.guess_type(name)
if isinstance(mime_type, (list, tuple)):
mime_type = mime_type[0]
if remote_path is None:
remote_path = channel
mldock_manager = MLDockConfigManager(
filepath=Path(project_directory, MLDOCK_CONFIG_NAME)
)
# get mldock_module_dir name
mldock_config = mldock_manager.get_config()
model_channels = ModelConfigManager(
config=mldock_config.get("model", []),
base_path=Path(project_directory, "model"),
)
model = model_channels.get(
channel=channel,
filename=name
)
if mime_type is None:
mime_type = model.get("type", None)
if compression is None:
compression = model.get("compression", None)
if remote_path is None:
remote_path = model.get("remote_path", None)
if remote is None:
remote = model.get("remote", None)
model_channels.add_asset(
channel=channel,
filename=name,
type=mime_type,
remote=remote,
compression=compression,
remote_path=remote_path,
update=True,
)
model_channels.write_gitignore()
mldock_manager.update_model_channels(models=model_channels.get_config())
mldock_manager.write_file()
except Exception as exception:
logger.error(exception)
raise
@click.command()
@click.option(
"--channel",
help="asset channel name. Directory name, within project model/ to store assets",
required=True,
type=str,
)
@click.option(
"--name",
help="asset filename name. File name, within project model/<channel> in which model artifact will be found",
required=True,
type=str,
)
@click.option(
"--project_directory",
"--dir",
"-d",
help="mldock container project.",
required=True,
type=click.Path(
exists=False,
file_okay=False,
dir_okay=True,
writable=True,
readable=True,
resolve_path=False,
allow_dash=False,
path_type=None,
),
)
def remove(channel, name, project_directory):
"""
Command to create models manifest for mldock enabled container projects.
"""
try:
if not Path(project_directory, MLDOCK_CONFIG_NAME).exists():
raise Exception(
(
"Path '{}' was not an mldock project. "
"Confirm this directory is correct, otherwise "
"create one.".format(project_directory)
)
)
mldock_manager = MLDockConfigManager(
filepath=Path(project_directory, MLDOCK_CONFIG_NAME)
)
# get mldock_module_dir name
mldock_config = mldock_manager.get_config()
input_data_channels = ModelConfigManager(
config=mldock_config.get("model", []),
base_path=Path(project_directory, "model"),
)
input_data_channels.remove(channel=channel, filename=name)
input_data_channels.write_gitignore()
mldock_manager.update_data_channels(data=input_data_channels.get_config())
mldock_manager.write_file()
except Exception as exception:
logger.error(exception)
raise
@click.command()
@click.option(
"--channel",
help="asset channel name. Directory name, within project data/ to store assets",
required=True,
type=str,
)
@click.option(
"--name",
help="asset filename name. File name, within project data/<channel> in which data artifact will be found",
required=True,
type=str,
)
@click.option(
"--project_directory",
"--dir",
"-d",
help="mldock container project.",
required=True,
type=click.Path(
exists=False,
file_okay=False,
dir_okay=True,
writable=True,
readable=True,
resolve_path=False,
allow_dash=False,
path_type=None,
),
)
def push(channel, name, project_directory):
"""
Command to create dataset manifest for mldock enabled container projects.
"""
try:
if not Path(project_directory, MLDOCK_CONFIG_NAME).exists():
raise Exception(
(
"Path '{}' was not an mldock project. "
"Confirm this directory is correct, otherwise "
"create one.".format(project_directory)
)
)
mldock_manager = MLDockConfigManager(
filepath=Path(project_directory, MLDOCK_CONFIG_NAME)
)
# get mldock_module_dir name
mldock_config = mldock_manager.get_config()
input_data_channels = ModelConfigManager(
config=mldock_config.get("model", []),
base_path=Path(project_directory, "model"),
)
model = input_data_channels.get(channel=channel, filename=name)
config_manager = CliConfigureManager()
remote = config_manager.remotes.get(name=model["remote"])
file_system, fs_base_path = infer_filesystem_type(remote["path"])
with ProgressLogger(
group="Upload",
text="Uploading model artifacts",
spinner="dots",
on_success="Successfully uploaded model artifacts",
) as spinner:
upload_assets(
file_system=file_system,
fs_base_path=fs_base_path,
local_path=Path(
project_directory, "model", model["channel"]
).as_posix(),
storage_location=Path("model", model["remote_path"]).as_posix(),
zip_artifacts=model.get("compression", None) == "zip",
)
spinner.stop()
except Exception as exception:
logger.error(exception)
raise
@click.command()
@click.option(
"--channel",
help="asset channel name. Directory name, within project data/ to store assets",
required=True,
type=str,
)
@click.option(
"--name",
help="asset filename name. File name, within project data/<channel> in which data artifact will be found",
required=True,
type=str,
)
@click.option(
"--project_directory",
"--dir",
"-d",
help="mldock container project.",
required=True,
type=click.Path(
exists=False,
file_okay=False,
dir_okay=True,
writable=True,
readable=True,
resolve_path=False,
allow_dash=False,
path_type=None,
),
)
def pull(channel, name, project_directory):
"""
Command to create dataset manifest for mldock enabled container projects.
"""
try:
if not Path(project_directory, MLDOCK_CONFIG_NAME).exists():
raise Exception(
(
"Path '{}' was not an mldock project. "
"Confirm this directory is correct, otherwise "
"create one.".format(project_directory)
)
)
mldock_manager = MLDockConfigManager(
filepath=Path(project_directory, MLDOCK_CONFIG_NAME)
)
# get mldock_module_dir name
mldock_config = mldock_manager.get_config()
input_data_channels = ModelConfigManager(
config=mldock_config.get("model", []),
base_path=Path(project_directory, "model"),
)
dataset = input_data_channels.get(channel=channel, filename=name)
config_manager = CliConfigureManager()
remote = config_manager.remotes.get(name=dataset["remote"])
file_system, fs_base_path = infer_filesystem_type(remote["path"])
with ProgressLogger(
group="Download",
text="Downloading model artifacts",
spinner="dots",
on_success="Successfully downloaded model artifacts",
) as spinner:
download_assets(
file_system=file_system,
fs_base_path=fs_base_path,
storage_location=Path("model", dataset["remote_path"]).as_posix(),
local_path=Path(
project_directory, "model", dataset["channel"]
).as_posix(),
)
spinner.stop()
except Exception as exception:
logger.error(exception)
raise
def add_commands(cli_group: click.group):
"""
add commands to cli group
args:
cli (click.group)
"""
cli_group.add_command(create)
cli_group.add_command(update)
cli_group.add_command(remove)
cli_group.add_command(push)
cli_group.add_command(pull)
add_commands(models)
|
from lib import *
# TODO: optional parameters:PFC, frame_filter_available, half_duplex_capable, statistics_counters_available, capable_1g, capable_100m, capable_10m
def Ethernet(base, size):
d = Device('Ethernet', base, size)
ETH_BASE = base
# For the device specified in
# https://www.xilinx.com/support/documentation/ip_documentation/axi_ethernet/v7_0/pg138-axi-ethernet.pdf and https://www.xilinx.com/support/documentation/ip_documentation/tri_mode_ethernet_mac/v8_2/pg051-tri-mode-eth-mac.pdf
# Our documentation of the assurance case, is explaining why we don't either
# read or write some bit.
# NOTE: our device is little-endian
# We start with the pg138 ethernet device
# Reset and Address Filter Register
d.read32(ETH_BASE + 0x0, 0b11111111111111111101000000000001, 0)
d.write32(ETH_BASE + 0x0, 0b00000000000000000101111111111110, 0)
# Transmit Pause Frame Register
d.read32(ETH_BASE + 0x4, 0b11111111111111110000000000000000, 0)
d.write32(ETH_BASE + 0x4, 0b00000000000000001111111111111111, 0)
# Transmit Inter Frame Gap Adjustment Register
d.read32(ETH_BASE + 0x8, 0xffffff00, 0)
d.write32(ETH_BASE + 0x8, 0x000000ff, 0)
# Interrupt Status Register
d.read32(ETH_BASE + 0xC, 0b11111111111111111111111000000000, 0)
# Bit 7 doesn't have a stable reset value.
d.write32(ETH_BASE + 0xC, 0b101111111, 0b001000000)
# Interrupt Pending Register
d.read32(ETH_BASE + 0x10, 0b11111111111111111111111000000000, 0)
d.write32(ETH_BASE + 0x10, 0b111111111, 0)
# Interrupt Enable Register
d.read32(ETH_BASE + 0x14, 0b11111111111111111111111000000000, 0)
d.write32(ETH_BASE + 0x14, 0b111111111, 0)
# TODO: Optionally Enabled Things:
# - Transmit/Receive VLAN Tag Register
# - Unicast Address Word Lower Register
# - VLAN TPID Word 0 Register
# - VLAN TPID Word 1 Register
# - PCS PMA TEMAC REGISTER
# 0x034-0x1FC are reserved, so we don't touch them.
# Now we move onto the pg051-tri-mode-eth-mac device
# 0x1FD-0x200 aren't defined.
# 0x200-0x364 are counters which might be nonzero even on a secure boot after a
# soft reset.
# 0x368-0x3FC are reserved.
# 0x3FD-0x400 aren't defined.
# Pause frame MAC Source Address
d.write32(ETH_BASE + 0x400, 0xffffffff, 0xffffffff)
# Receiver Configuration word
# Setting this bit resets the rest of this register to defaults.
d.write32(ETH_BASE + 0x404, 0b10000000000000000000000000000000,
0b10000000000000000000000000000000)
# Transmitter Configuration Word
# Setting this bit resets the rest of this register to defaults.
d.write32(ETH_BASE + 0x408, 0b10000000000000000000000000000000,
0b10000000000000000000000000000000)
# TODO: optional features
#if PFC:
# # Flow Control Configuration Word
# d.write32(ETH_BASE + 0x40c, 0b01100110000100001111111111111111,
# 0b01100000000100001111111111111111)
# MAC Speed Configuration Word
d.write32(ETH_BASE + 0x410, 0b11000000000000000000000000000000,
0b10000000000000000000000000000000)
# RX Max Frame Configuration Word
d.write32(ETH_BASE + 0x414, 0b00000000000000010111111111111111,
0b00000000000000000000011111010000)
# TX Max Frame Configuration Word
d.write32(ETH_BASE + 0x418, 0b00000000000000010111111111111111,
0b00000000000000000000011111010000)
# TODO: optionally enabled features:
#if PFC:
# # Per Priority Quanta
# # The same register format is repeated 7 times.
# for idx in range(7):
# d.write32(ETH_BASE + 0x480 + (idx * 4), 0xffffffff,
# (0xff00 << 16) | 0xffff)
# # Legacy Pause Refresh Register
# d.write32(ETH_BASE + 0x4A0, 0b11111111111111110000000000000000,
# 0xFF00 << 16)
# ID Register
# You will likely need to change this value, since it measures the patch-
# level of the ethernet device.
#d.read32(ETH_BASE + 0x4F8, 0b11111111111111110000000011111111, 42)
# Abilty Register
#d.read32(
# ETH_BASE + 0x4FC, 0b00000000000000010000011100000111, (int(PFC) << 16)
# | (int(frame_filter_available) << 10) | (int(half_duplex_capable) << 9)
# | (int(statistics_counters_available) << 8) | ((int(capable_1g) << 2)))
# MDIO Configuration Registers
# MDIO Setup Word
# TODO: finish up the remainder of the optionally-enabled and configuration-dependent features
# Interrupt Controller
# We can't verify the status of interrupts, but we can clear existing interrupts and make sure they're enabled.
d.write32(ETH_BASE + 0x620, 0xffffff, 0)
d.write32(ETH_BASE + 0x630, 0xffffff, 0)
# TODO: more optional features
# - frame-filter configuration
# - AVB endpoint
# - RTC configuration
return d
|
# -*- coding:utf-8 -*-
"""
rule based query generation
------------
book person
"""
from refo import Star, Any
from inference.basic_inference import W, Rule, KeywordRule
from inference.basic_inference import SPARQL_PREFIX, SPARQL_ASK_TEM, SPARQL_COUNT_TEM, SPARQL_SELECT_TEM, SPARQL_SELECT_TEM_FD, SPARQL_COUNT_TEM_FD
from inference.basic_inference import pos_person, pos_book_or_movie, pos_number, person_entity, book_or_movie_entity, number_entity
from inference.basic_inference import BookPropertyValueSet
import re
book = (W('book')|W('书籍') | W('书') | W('图书'))
author = (W('write')|W('wrote')|W('writes')|W('写作') | W('写了') | W('写过')|W('author'))
translator = (W('translate')|W('translates')|W('翻译')|W('translator'))
image_url = (W('image')|W('poster')|W('picture')|W('cover')|W('海报') | W('图片') | W('封面'))
gender = (W('sex')|W('gender')|W('性别'))
birthday = (W('when') + W('born')|W('birthday')|W('birth') + W('date')|W('出生日期') | W("出生时间") | W('生日') | W('时间') + W('出生'))
birthplace = (W('birthplace')|W('where') + W('born')|W('出生地') | W('地点') + W('出生'))
other_name = (W('alternate')|W('other') + W('name')|W('nickname')|W('Chinese') + W('name')|W('English') + W('name')|W('其他名字') | W('其他名称') | W('别名') | W('中文名') | W('英文名'))
introduction = (W('introduction')|W('who')|W('简介') | W('自我介绍') | W('介绍') | W("是") + W("谁"))
detail_information = (W('detail') + W('information')|W('详细信息') | W('详细介绍'))
book_person_info = (gender | birthday | birthplace | introduction)
category = (W("category") |W("genre") |W("类型") | W("种类"))
several = (W("several") |W("how") + W("many") |("多少") | W("几部"))
higher = (W("higher") |W("大于") | W("高于"))
lower = (W("lower")|W("小于") | W("低于"))
compare = (higher | lower)
"""
SPARQL template
"""
class QuestionSet:
def __init__(self):
pass
@staticmethod
def has_book_person_info(word_objects):
"""
author's basic information
:return:
"""
keyword = None
for r in basic_book_person_fd:
keyword = r.apply(word_objects)
if keyword is not None:
keyword_split = re.split("( )+", keyword)
keyword_db = keyword_split.pop()
keyword_split.pop()
keyword_douban = keyword_split.pop()
break
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_person:
e_douban = u"?p :book_person_name '{person}'.\n" \
u"?p {keyword} ?x.".format(person=w.token, keyword=keyword_douban)
e_db = u"?p rdfs:label '{person}'@en.\n" \
u"?m dbo:author ?p.\n" \
u"?p {keyword} ?x".format(person=re.sub(r"(\w)([A-Z])", r"\1 \2", w.token), keyword=keyword_db)
sparql = SPARQL_SELECT_TEM_FD.format(prefix=SPARQL_PREFIX,
select=select,
expression_douban=e_douban,
expression_db=e_db)
break
return sparql
@staticmethod
def has_authored_in(word_objects):
"""
Who writes what books (e.g. <NAME>)
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_person:
e_douban = u"?p :book_person_name '{person}'." \
u"?p :has_authored_in ?b." \
u"?b :book_info_name ?x".format(person=w.token)
e_db = u"?p rdfs:label '{person}'@en.\n" \
u"?m dbo:author ?p.\n" \
u"?m foaf:name ?x".format(person=re.sub(r"(\w)([A-Z])", r"\1 \2", w.token))
sparql = SPARQL_SELECT_TEM_FD.format(prefix=SPARQL_PREFIX,
select=select,
expression_douban=e_douban,
expression_db=e_db)
break
return sparql
@staticmethod
def has_translated_in(word_objects):
"""
who translate what books (e.g. <NAME>)
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_person:
e_douban = u"?p :book_person_name '{person}'." \
u"?p :has_translated_in ?b." \
u"?b :book_info_name ?x".format(person=w.token)
e_db = u"?p rdfs:label '{person}'@en.\n" \
u"?m dbo:translator ?p.\n" \
u"?m foaf:name ?x".format(person=re.sub(r"(\w)([A-Z])", r"\1 \2", w.token))
sparql = SPARQL_SELECT_TEM_FD.format(prefix=SPARQL_PREFIX,
select=select,
expression_douban=e_douban,
expression_db=e_db)
break
return sparql
@staticmethod
def has_write_quantity_book(word_objects):
"""
How many books does an author write
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_person:
e_db = u"?p rdfs:label '{person}'@en.\n" \
u"?m dbo:author ?p.\n" \
u"?m foaf:name ?x".format(person=re.sub(r"(\w)([A-Z])", r"\1 \2", w.token))
e_douban = u"?p :book_person_name '{person}'." \
u"?p :has_authored_in ?b.\n" \
u"?b :book_info_name ?x".format(person=w.token)
sparql = SPARQL_COUNT_TEM_FD.format(prefix=SPARQL_PREFIX,
select=select,
expression_douban=e_douban,
expression_db=e_db)
break
return sparql
@staticmethod
def has_translate_quantity_book(word_objects):
"""
How many books does an author translate
:param word_objects:
:return:
"""
select = u"?x"
sparql = None
for w in word_objects:
if w.pos == pos_person:
e_db = u"?p rdfs:label '{person}'@en.\n" \
u"?m dbo:translator ?p.\n" \
u"?m foaf:name ?x".format(person=re.sub(r"(\w)([A-Z])", r"\1 \2", w.token))
e_douban = u"?p :book_person_name '{person}'." \
u"?p :has_translated_in ?b.\n" \
u"?b :book_info_name ?x".format(person=w.token)
sparql = SPARQL_COUNT_TEM_FD.format(prefix=SPARQL_PREFIX,
select=select,
expression_douban=e_douban,
expression_db=e_db)
break
return sparql
book_person_rules_fd = [
Rule(condition_num=7, condition=person_entity + Star(Any(), greedy=False) + book_person_info + Star(Any(), greedy=False), action=QuestionSet.has_book_person_info),
Rule(condition_num=7, condition=person_entity + Star(Any(), greedy=False) + author + Star(Any(), greedy=False), action=QuestionSet.has_authored_in),
Rule(condition_num=7, condition=person_entity + Star(Any(), greedy=False) + translator + Star(Any(), greedy=False), action=QuestionSet.has_translated_in)
]
basic_book_person_fd = [
KeywordRule(condition=person_entity + Star(Any(), greedy=False) + gender + Star(Any(), greedy=False), action=BookPropertyValueSet.return_book_person_gender_value_FD),
KeywordRule(condition=person_entity + Star(Any(), greedy=False) + birthday + Star(Any(), greedy=False), action=BookPropertyValueSet.return_book_person_birthday_value_FD),
KeywordRule(condition=person_entity + Star(Any(), greedy=False) + birthplace + Star(Any(), greedy=False), action=BookPropertyValueSet.return_book_person_birthplace_value_FD),
KeywordRule(condition=person_entity + Star(Any(), greedy=False) + introduction + Star(Any(), greedy=False), action=BookPropertyValueSet.return_book_person_introduction_value_FD)
] |
r"""
Keywords manipulation
==================================================================================================
This object contains a list of unique keywords (terms of interest).
Regular expressions recipes
---------------------------------------------------------------------------------------------------
The following code exemplify some common cases using regular expressions.
>>> keywords = Keywords('111')
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five') is None
True
* Partial match.
>>> keywords = Keywords('hre')
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'hre'
* **Word whole only**. `r'\b'` represents word boundaries.
>>> keywords = Keywords(r'\btwo\b', use_re=True)
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'two'
>>> keywords = Keywords(r"\b(TWO)\b", use_re=True)
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'two'
* **Case sensitive**.
>>> keywords = Keywords(r'\btwo\b', ignore_case=False, use_re=True)
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'two'
>>> keywords = Keywords(r"\bTWO\b", ignore_case=False, use_re=True)
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one TWO three four five')
'TWO'
>>> keywords = Keywords(r"\bTWO\b", ignore_case=False, use_re=True)
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five') is None
True
* **A word followed by other word**.
>>> keywords = Keywords(r'\btwo\Wthree\b', ignore_case=False, use_re=True)
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'two three'
* **Multiple white spaces**.
>>> keywords = Keywords(r"two\W+three", ignore_case=False, use_re=True)
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'two three'
* **A list of keywords**.
>>> keywords = Keywords([r"xxx", r"two", r"yyy"])
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'two'
* **Adjacent terms but the order is unimportant**.
>>> keywords = Keywords(r"\bthree\W+two\b|\btwo\W+three\b", use_re=True)
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'two three'
* **Near words**.
Two words (`'two'`, `'four'`) separated by any other.
>>> keywords = Keywords(r"\btwo\W+\w+\W+four\b", use_re=True)
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'two three four'
Two words (`'two'`, `'five'`) separated by one, two or three unspecified words.
>>> keywords = Keywords(r"\btwo\W+(?:\w+\W+){1,3}?five", use_re=True)
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'two three four five'
* **Or operator**.
>>> keywords = Keywords(r"123|two", use_re=True)
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'two'
* **And operator**. One word followed by other at any word distance.
>>> keywords = Keywords(r"\btwo\W+(?:\w+\W+)+?five", use_re=True)
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'two three four five'
Functions in this module
---------------------------------------------------------------------------------------------------
"""
import json
import re
import string
import pandas as pd
class Keywords:
"""Creates a Keywords object used to find, extract or remove terms of interest from a string."""
def __init__(
self, keywords=None, sep=None, ignore_case=True, full_match=False, use_re=False
):
"""Creates a keywords object.
Args:
keywords (string, list of strings, techminer.Keywords): set of keywords to add.
sep (character): separator character in string lists.
ignore_case (bool) : Ignore string case.
full_match (bool): match whole word?.
use_re (bool): keywords as interpreted as regular expressions.
Returns:
Keywords object
"""
self._ignore_case = ignore_case
self._full_match = full_match
self._keywords = None
self._patterns = None
self._use_re = use_re
self.add_keywords(keywords=keywords, sep=sep)
@property
def keywords(self):
return self._keywords
def __repr__(self):
"""String representation of the object.
Examples
----------------------------------------------------------------------------------------------
>>> Keywords(['Big data', 'neural networks']) # doctest: +NORMALIZE_WHITESPACE
[
"Big data",
"neural networks"
]
ignore_case=True, full_match=False, use_re=False, compiled=False
"""
text = json.dumps(self._keywords, indent=2, sort_keys=True)
text += "\nignore_case={}, full_match={}, use_re={}, compiled={}".format(
self._ignore_case.__repr__(),
self._full_match.__repr__(),
self._use_re.__repr__(),
self._patterns is not None,
)
return text
def __str__(self):
return self.__repr__()
def add_keywords(self, keywords, sep=None):
"""Adds new keywords x to list of current keywords.
Args:
keywords (string, list of strings, techminer.Keywords): new keywords to be added.
sep (character): separator character in string lists.
Returns:
Nothing
Examples
----------------------------------------------------------------------------------------------
>>> keywords = Keywords()
>>> keywords = keywords.add_keywords('ann')
>>> keywords
[
"ann"
]
ignore_case=True, full_match=False, use_re=False, compiled=False
>>> keywords = keywords.add_keywords('RNN')
>>> keywords
[
"RNN",
"ann"
]
ignore_case=True, full_match=False, use_re=False, compiled=False
>>> keywords = keywords.add_keywords(['deep learning', 'fuzzy'])
>>> keywords
[
"RNN",
"ann",
"deep learning",
"fuzzy"
]
ignore_case=True, full_match=False, use_re=False, compiled=False
>>> other_keywords_list = Keywords().add_keywords(['a', 'b', 'c'])
>>> keywords = keywords.add_keywords(other_keywords_list)
>>> keywords
[
"RNN",
"a",
"ann",
"b",
"c",
"deep learning",
"fuzzy"
]
ignore_case=True, full_match=False, use_re=False, compiled=False
"""
if keywords is None:
return
if isinstance(keywords, str):
keywords = [keywords]
if isinstance(keywords, Keywords):
keywords = keywords._keywords
if isinstance(keywords, pd.Series):
keywords = keywords.tolist()
if sep is not None:
keywords = [
z.strip()
for y in keywords
if y is not None
for z in y.split(sep)
if z.strip() != ""
]
else:
keywords = [
y.strip() for y in keywords if y is not None and y.strip() != ""
]
if self._keywords is None:
self._keywords = sorted(list(set(keywords)))
else:
keywords.extend(self._keywords)
self._keywords = sorted(list(set(keywords)))
self._patterns = None
return self
def __len__(self):
"""Returns the number of keywords.
Examples
----------------------------------------------------------------------------------------------
>>> len(Keywords(['Big data', 'neural networks'])) # doctest: +NORMALIZE_WHITESPACE
2
"""
return len(self._keywords)
def compile(self):
"""Compiles regular expressions.
Examples
----------------------------------------------------------------------------------------------
>>> x = Keywords(['Big data', 'neural networks'])
>>> x
[
"Big data",
"neural networks"
]
ignore_case=True, full_match=False, use_re=False, compiled=False
>>> x = x.compile()
>>> x
[
"Big data",
"neural networks"
]
ignore_case=True, full_match=False, use_re=False, compiled=True
"""
patterns = self._keywords
if self._use_re is False:
patterns = [re.escape(pattern) for pattern in patterns]
if self._full_match is True:
patterns = ["^" + pattern + "$" for pattern in patterns]
if self._ignore_case is True:
self._patterns = [re.compile(pattern, re.I) for pattern in patterns]
else:
self._patterns = [re.compile(pattern) for pattern in patterns]
return self
def extract_from_text(self, x, sep=";"):
r"""Returns a new string with the keywords in string x matching the list of
keywords used to fit the model.
The funcion allows the extraction of complex patterns using regular expresions (regex).
Detail information about regex sintax in Python can be obtained at
https://docs.python.org/3/library/re.html#re-syntax.
Args:
x (string): A string object.
Returns:
String.
Examples
----------------------------------------------------------------------------------------------
>>> keywords = Keywords([r"xxx", r"two", r"yyy"])
>>> keywords = keywords.compile()
>>> keywords.extract_from_text('one two three four five')
'two'
"""
if x is None or not isinstance(x, str):
return None
result = []
if sep is None:
for pattern in self._patterns:
match = pattern.search(x)
if match is not None:
result.append(match[0])
return result
else:
for pattern in self._patterns:
for word in x.split(";"):
match = pattern.search(word)
if match is not None:
result.append(match[0])
if len(result):
return sep.join(sorted(list(set(result))))
return None
def __contains__(self, x):
"""Implements in operator.
Examples
----------------------------------------------------------------------------------------------
>>> x = ['Big data', 'neural networks']
>>> 'Big data' in Keywords(x).compile() # doctest: +NORMALIZE_WHITESPACE
True
>>> 'big data' in Keywords(x).compile() # doctest: +NORMALIZE_WHITESPACE
True
>>> 'deep learning' in Keywords(x).compile() # doctest: +NORMALIZE_WHITESPACE
False
>>> 'big data' in Keywords(x, ignore_case=False).compile() # doctest: +NORMALIZE_WHITESPACE
False
"""
if self._patterns is None:
self.compile()
if self.extract_from_text(x) is None:
return False
return True
def remove_from_text(self, x):
"""Returns a string removing the strings that match a
list of keywords from x.
Args:
x (string): A string object.
Returns:
String.
Examples
----------------------------------------------------------------------------------------------
>>> Keywords('aaa').compile().remove_from_text('1 aaa 2')
'1 2'
>>> Keywords('aaa').compile().remove_from_text('1 2')
'1 2'
>>> Keywords('aaa').compile().remove_from_text('1 aaa 2 1 2')
'1 2 1 2'
>>> Keywords(['aaa', 'bbb']).compile().remove_from_text('1 aaa bbb 2 1 aaa 2')
'1 2 1 2'
"""
if x is None:
return None
for pattern in self._patterns:
x = pattern.sub(repl="", string=x)
return x
def transform(self, x, sep=None):
"""Creates a new Keywords object by applying the current Keywords to x.
Args:
x (string): A string object.
sep (str): character separator.
Examples
----------------------------------------------------------------------------------------------
>>> x = ['11', '111', '11 11 ', 'a', 'b', 'c']
>>> keywords = Keywords('1.*', use_re=True)
>>> keywords = keywords.compile()
>>> keywords.transform(x)
[
"11",
"11 11",
"111"
]
ignore_case=True, full_match=False, use_re=True, compiled=False
"""
if sep is not None:
x = [
z.strip()
for y in x
if y is not None
for z in y.split(sep)
if z.strip() != ""
]
else:
x = [y.strip() for y in x if y is not None and y.strip() != ""]
x = [self.extract_from_text(w) for w in x]
return Keywords(
keywords=x,
ignore_case=self._ignore_case,
full_match=self._full_match,
use_re=self._use_re,
)
def tolist(self):
"""Converts keywords to list.
Examples
----------------------------------------------------------------------------------------------
>>> keywords = Keywords([r"xxx", r"two", r"yyy"])
>>> keywords.tolist()
['two', 'xxx', 'yyy']
"""
return self._keywords.copy()
def __add__(self, other):
keywords = set(self._keywords + other._keywords)
ignore_case = self._ignore_case or other._ignore_case
full_match = self._full_match or other._full_match
use_re = self._use_re or other._use_re
x = Keywords(
keywords, ignore_case=ignore_case, full_match=full_match, use_re=use_re
)
#
# NLP
#
def extract_after_first(self, x):
"""Returns the string from the first ocurrence of the keyword to the end of string x.
Args:
x : string
Returns:
String
Examples
----------------------------------------------------------------------------------------------
>>> keywords = Keywords('aaa')
>>> keywords = keywords.compile()
>>> keywords.extract_after_first('1 aaa 4 aaa 5')
'aaa 4 aaa 5'
>>> keywords = Keywords('bbb')
>>> keywords = keywords.compile()
>>> keywords.extract_after_first('1 aaa 4 aaa 5')
"""
for pattern in self._patterns:
z = pattern.search(x)
if z:
return x[z.start() :]
return None
def extract_after_last(self, x):
"""Returns the string from last ocurrence of a keyword to the end of string x.
Args:
x: string
Returns:
String
Examples
----------------------------------------------------------------------------------------------
>>> keywords = Keywords('aaa')
>>> keywords = keywords.compile()
>>> keywords.extract_after_last('1 aaa 4 aaa 5')
'aaa 5'
"""
for pattern in self._patterns:
z = pattern.findall(x)
result = x
for w in z[:-1]:
y = pattern.search(result)
result = result[y.end() :]
y = pattern.search(result)
return result[y.start() :]
return None
def extract_nearby(self, x, n_phrases=0):
"""Extracts the words of string x in the proximity of the terms matching
the keywords list.
Args:
x (string): A string object.
n_phrases (integer): number of phrases around term.
Returns:
String.
Examples
----------------------------------------------------------------------------------------------
>>> import pandas as pd
>>> df = pd.DataFrame({
... 'f': ['1. 2. 3. 4. 5. 6.',
... 'aaa. 1. 2. 3. 4. 5.',
... '1. aaa. 2. 3. 4. 5.',
... '1. 2. 3. aaa. 4. 5.',
... '1. 2. 3. 4. aaa. 5.',
... '1. 2. 3. 4. 5. aaa.',
... 'bbb. 1. 2. 3. 4. 5.',
... '1. 2. 3. 4. 5. bbb.',
... '1. 2. 3. ccc. 4. 5.'],
... })
>>> df
f
0 1. 2. 3. 4. 5. 6.
1 aaa. 1. 2. 3. 4. 5.
2 1. aaa. 2. 3. 4. 5.
3 1. 2. 3. aaa. 4. 5.
4 1. 2. 3. 4. aaa. 5.
5 1. 2. 3. 4. 5. aaa.
6 bbb. 1. 2. 3. 4. 5.
7 1. 2. 3. 4. 5. bbb.
8 1. 2. 3. ccc. 4. 5.
>>> keywords = Keywords(['aaa', 'bbb', 'ccc'], use_re=True)
>>> keywords = keywords.compile()
>>> df.f.map(lambda x: keywords.extract_nearby(x, n_phrases=2)) # doctest: +NORMALIZE_WHITESPACE
0 None
1 aaa. 1. 2.
2 aaa. 2. 3.
3 2. 3. aaa. 4. 5.
4 3. 4. aaa.
5 4. 5. aaa.
6 bbb. 1. 2.
7 4. 5. bbb.
8 2. 3. ccc. 4. 5.
Name: f, dtype: object
"""
result = []
x = x.split(".")
x = [w.strip() for w in x]
x = [w for w in x if w != ""]
for index, phrase in enumerate(x):
for pattern in self._patterns:
z = pattern.findall(phrase)
if len(z):
if n_phrases != 0:
#
# Left side
#
pos = index - n_phrases
if pos >= 0:
result.extend(x[pos:index])
#
# Current phrase
#
result.append(phrase)
#
# Right side
#
pos = index + n_phrases
if pos < len(x):
result.extend(x[index + 1 : pos + 1])
else:
#
# Only the current phrase
#
result.append(phrase)
#
if len(result):
return ". ".join(result) + "."
return None
def extract_until_first(self, x):
"""Returns the string from begining of x to the first ocurrence of a keyword.
Args:
x: string
Returns:
String
Examples
----------------------------------------------------------------------------------------------
>>> keywords = Keywords('aaa')
>>> keywords = keywords.compile()
>>> keywords.extract_until_first('1 aaa 4 aaa 5')
'1 aaa'
"""
for pattern in self._patterns:
z = pattern.search(x)
if z:
return x[: z.end()]
return None
def extract_until_last(self, x):
"""Returns the string from begining of x to the last ocurrence of a keyword.
Args:
x: string
Returns:
String
Examples
----------------------------------------------------------------------------------------------
>>> keywords = Keywords('aaa')
>>> keywords = keywords.compile()
>>> keywords.extract_until_last('1 aaa 4 aaa 5')
'1 aaa 4 aaa'
"""
for pattern in self._patterns:
z = list(pattern.finditer(x))
if z:
return x[0 : z[-1].end(0)]
return None
if __name__ == "__main__":
import doctest
doctest.testmod()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This module contains the class Bounded_Manager, which manages a cloud"""
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Development"
import random
from .manager import Manager
from ..utils import split_list
class Bounded_Manager(Manager):
"""Simulates a manager for a DDOS attack
This Manager class uses a bounded shuffling algorithm"""
runnable = True
paper = False
def detect_and_shuffle(self, *args):
"""Bounded Manager algorithm"""
if self.num_attackers_guess is None:
# Start attacker guess with # attacked buckets
self.num_attackers_guess = len(self.attacked_buckets)
# If number of attacked buckets is greater than guess
if len(self.attacked_buckets) > self.num_attackers_guess:
# Move attackers into # attacked_buckets - guess
self.case_1()
# Number of buckets < attacker guess * 3
elif len(self.used_buckets) < self.num_attackers_guess * 3:
# Move attackers into # attacked buckets + 1
self.case_2()
# Number of buckets >= attacker_guess * 3
else:
# Combine all every other non attacked bucket by reputation
self.case_3()
# Count # of times bucket not attacked
self._incriment_buckets()
def case_1(self):
"""When number of attacked buckets is greater than guess
Move attackers into # attacked_buckets - guess"""
# New buckets = total # attackers - old guess
new_bucket_amnt = len(self.attacked_buckets)
new_bucket_amnt -= self.num_attackers_guess
# Shuffle attacked buckets with new amnt
self._shuffle_attacked_buckets(new_bucket_amnt)
# Reset attacker guess
self.num_attackers_guess = len(self.attacked_buckets)
def case_2(self):
"""Number of buckets < attacker guess * 3
Move attackers into # attacked buckets + 1"""
# Shuffle attacked users into num attacked buckets * 2
# So one new bucket
self._shuffle_attacked_buckets(len(self.attacked_buckets) + 1)
def case_3(self):
"""Number of buckets >= attacker_guess * 3
Combine non attacked buckets by reputation,
add one to attacked buckets"""
# Sorts buckets by reputation
sorted_buckets = list(sorted(self.non_attacked_buckets,
key=lambda x: x.turns_not_attacked))
# For every other bucket
for i in range(0, len(sorted_buckets), 2):
try:
# Combine the two buckets
users = sorted_buckets[i].users
users += sorted_buckets[i + 1].users
sorted_buckets[i].users = users
for user in users:
user.bucket = sorted_buckets[i]
self.remove_bucket(sorted_buckets[i + 1])
# last bucket
except IndexError:
# Odd # of buckets, just append the full bucket
# NOTE: This should prob be changed
# NOTE: Should evenly divide out amongst all buckets
# NOTE: rather than having one last full bucket
pass
# Add one bucket to attackers and reorder
self._shuffle_attacked_buckets(len(self.attacked_buckets) + 1)
def _shuffle_attacked_buckets(self, new_bucket_amnt):
"""Detects/Moves attackers into new_bucket_amnt buckets and shuffles"""
# Get rid of attackers if they are the only one in the bucket first
new_attacked_buckets = [x for x in self.attacked_buckets if len(x) > 1]
new_bucket_amnt = self._remove_attackers(new_attacked_buckets,
new_bucket_amnt)
# Checking to make sure we didn't remove all attackers
if len(self.attacked_buckets) > 0 and new_bucket_amnt > 0:
# This can happen if we remove an attacker
if new_bucket_amnt > len(self.attacked_users):
new_bucket_amnt = len(self.attacked_users)
users = self.attacked_users
random.shuffle(users)
# Clear out buckets
for bucket in self.attacked_buckets:
self.remove_bucket(bucket)
for i, user_chunk in enumerate(split_list(users, new_bucket_amnt)):
self.get_new_bucket().reinit(user_chunk)
def _remove_attackers(self, new_attacked_buckets, new_bucket_amnt):
"""Removes attackers if they are the only one in the bucket"""
old_num_buckets = len(self.attacked_buckets)
# Removes attacked buckets and their attackers if bucket len is 1
self.remove_attackers()
diff = len(self.attacked_buckets) - old_num_buckets
# If we removed attackers
if diff > 0:
# Remove buckets
self.num_attackers_guess -= diff
new_bucket_amnt -= diff
return new_bucket_amnt
def _incriment_buckets(self):
"""Incriments buckets by # turns not attacked in a row"""
for bucket in self.used_buckets.values():
if bucket.attacked:
bucket.turns_not_attacked = 0
else:
bucket.turns_not_attacked += 1
|
import pandas as pd
import numpy as np
from modele_autoregresyjne.sarima_functions import sarima_forecast, grid_search
from preprocessing_danych.dataset_config import train_index, val_index, test_index
from LSTM.train_lstm.lstm_functions import preprocesing_data
import re
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error as mse
import tensorflow as tf
def results_from_logs(file_path):
model_type = file_path.split('_results')[0]
f = open(file_path)
names = []
rmse_val = []
rmse_train = []
results = pd.DataFrame()
for line in f:
if line.startswith('Dumped tool data for kernel_stats.pb to '):
name = line.split('Dumped tool data for kernel_stats.pb to ')[1]
name = name.split('/train/')[0]
names.append(name)
if line.startswith('RMSE_train'):
rmse_ = re.findall(r"[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", line)[0]
rmse_train.append(rmse_)
if line.startswith('RMSE_val'):
rmse_ = re.findall(r"[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", line)[0]
rmse_val.append(rmse_)
results['name'] = names
results['RMSE_val'] = rmse_val
results['RMSE_train'] = rmse_train
results['normalize_function'] = [name.split('/')[0].replace('logs_','') for name in results['name']]
results['model_name'] = [name.split('/')[1] for name in results['name']]
results['RMSE_val'] = pd.to_numeric(results['RMSE_val'])
results['RMSE_train'] = pd.to_numeric(results['RMSE_train'])
results['model_type'] = [s_.split('-')[0] for s_ in results['model_name']]
results['window_len'] = [s_.split('-')[1] for s_ in results['model_name']]
results['window_len'] = results['window_len'].astype(int)
results['scaller'] = [s_.split('-')[3] for s_ in results['model_name']]
results['rnn_cells'] = [s_.split('-')[5] for s_ in results['model_name']]
results['rnn_cells'] = results['rnn_cells'].astype(int)
results['extra_hidden_layer'] = np.where(results.index % 2 == 1, 1, 0)
return results.drop('name', axis=1)
spot_df = pd.read_pickle("data/tge_spot_preprocessed.p")
df_ = spot_df.diff().dropna()
df_test = df_.loc[test_index[0]:test_index[1]]
n_test = len(df_test)
score_df = spot_df.copy()
score_df['diff'] = spot_df['TGEgasDA'].diff()
score_df['pct_change'] = spot_df['TGEgasDA'].pct_change()
score_df['reference'] = 0
score_df.dropna(inplace=True)
df_score_ = score_df.loc[train_index[0]:val_index[0]][90:-1]
train_score_pct = mse(df_score_['pct_change'], df_score_['reference'])**(1/2)
train_score_diff = mse(df_score_['diff'], df_score_['reference'])**(1/2)
train_score_pct, train_score_diff
df_score_ = score_df.loc[val_index[0]:val_index[1]]
val_score_pct = mse(df_score_['pct_change'], df_score_['reference'])**(1/2)
val_score_diff = mse(df_score_['diff'], df_score_['reference'])**(1/2)
val_score_pct, val_score_diff
df_score_ = score_df.loc[test_index[0]:test_index[1]]
test_score_pct = mse(df_score_['pct_change'], df_score_['reference'])**(1/2)
test_score_diff = mse(df_score_['diff'], df_score_['reference'])**(1/2)
test_score_pct, test_score_diff
files = ['LSTM/LSTM2_RNN_results/lstm_2warst_rnn.log',
'LSTM/LSTM2_results/lstm_2warstw.log',
'LSTM/LSTM3_results/lstm_3warstw.log',
'LSTM/LSTM1_results/lstm_1warstw.log']
results_all = pd.DataFrame()
for file_ in files:
print(file_)
results_all = results_all.append(results_from_logs(file_))
# results_all.to_csv('Documentation/LSTM_results.csv')
# results_all.read_csv('Documentation/LSTM_results.csv')
best_pct_df = results_all.query(f"normalize_function == 'pct_change' & RMSE_train < {train_score_pct} & RMSE_val < {val_score_pct}").sort_values('RMSE_val').head(5)
best_diff_df = results_all.query(f"normalize_function == 'diff' & RMSE_train < {train_score_diff} & RMSE_val < {val_score_diff}").sort_values('RMSE_val').head(5)
# for k, row in best_pct_df[:5].iterrows():
# sequence_size = row['window_len']
# model_file = f"LSTM/{row['model_type']}_results/models_{row['normalize_function']}/{row['model_name']}"
# model = tf.keras.models.load_model(model_file)
# data_set = spot_df.pct_change() if (row['normalize_function'] == 'pct_change') else spot_df.diff()
# data_set.dropna(inplace=True)
# scaler = StandardScaler() if (row['scaller'] == 'STD') else MinMaxScaler(feature_range=(0,1))
# df_scalled = pd.DataFrame(scaler.fit_transform(data_set), index=data_set.index, columns=['TGEgasDA'])
# df_scalled.dropna(inplace=True)
# X, y, idxes = preprocesing_data(df_scalled, sequence_size)
# split_test_idx = idxes.astype(str).to_list().index(val_index[0])
# test_X, test_y = X[split_test_idx:], y[split_test_idx:]
# pred_y = scaler.inverse_transform(model.predict(test_X))
# df_pred = pd.DataFrame(pred_y, index=spot_df.loc[val_index[0]:].index)
# column_name = row['normalize_function']
# df_pred.columns = ['forecast_'+column_name]
# df_pred['forecast_'+column_name].to_pickle(f"forecast/{row['normalize_function']}/{row['model_name']}.p",protocol=2)
# for k, row in best_diff_df[:5].iterrows():
# sequence_size = row['window_len']
# model_file = f"LSTM/{row['model_type']}_results/models_{row['normalize_function']}/{row['model_name']}"
# model = tf.keras.models.load_model(model_file)
# data_set = spot_df.pct_change() if (row['normalize_function'] == 'pct_change') else spot_df.diff()
# data_set.dropna(inplace=True)
# scaler = StandardScaler() if (row['scaller'] == 'STD') else MinMaxScaler(feature_range=(0,1))
# df_scalled = pd.DataFrame(scaler.fit_transform(data_set), index=data_set.index, columns=['TGEgasDA'])
# df_scalled.dropna(inplace=True)
# X, y, idxes = preprocesing_data(df_scalled, sequence_size)
# split_test_idx = idxes.astype(str).to_list().index(val_index[0])
# test_X, test_y = X[split_test_idx:], y[split_test_idx:]
# pred_y = scaler.inverse_transform(model.predict(test_X))
# df_pred = pd.DataFrame(pred_y, index=spot_df.loc[val_index[0]:].index)
# column_name = row['normalize_function']
# df_pred.columns = ['forecast_'+column_name]
# df_pred['forecast_'+column_name].to_pickle(f"forecast/{row['normalize_function']}/{row['model_name']}.p",protocol=2)
#################### ploting
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
plt.style.use('ggplot')
def sarima_forecast_plots(df, title, ylabels, xlabel, residuals=True, save_file=''):
if residuals:
fig, (ax_series, ax_residuals) = plt.subplots(2, 1, figsize=(20, 6), sharex=True, gridspec_kw={'height_ratios': [3, 1]})
else:
fig, ax_series = plt.subplots(figsize=(20, 5))
fig.suptitle(title, fontsize=20)
ax_series.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
ax_series.xaxis.set_minor_formatter(mdates.DateFormatter('%Y-%m'))
ax_series.set_xlim(df.index.min()-pd.Timedelta('14d'), df.index.max()+pd.Timedelta('14d'))
ax_series.set_ylabel(ylabels[0], fontsize=15)
if residuals:
ax_residuals.set_ylabel(ylabels[1], fontsize=10)
ax_residuals.set_xlabel(xlabel, fontsize=15)
else:
ax_series.set_xlabel(xlabel, fontsize=15)
plt.xticks(rotation=45)
ax_series.plot(df.index, df['TGEgasDA'], label='cena rzeczywista', c='darkblue', linewidth=3, alpha=0.4)
ax_series.plot(df.index, df['forecast_val'], label='prognoza - zbiór walidacyjny', linestyle='--', c='gold')
ax_series.plot(df.index, df['forecast_test'], label='prognoza - zbiór testowy', linestyle='--', c='darkorange')
if residuals:
ax_residuals.fill_between(df.index, df['residuals_val'], label=['forecast_val'], color='gold')
ax_residuals.fill_between(df.index, df['residuals_test'], label=['forecast_test'], color='darkorange')
ax_series.legend(loc='upper right', prop={"size":15})
if save_file != '':
plt.savefig(f"{save_file}.png", bbox_inches = "tight")
else:
plt.show()
pass
for a, row in best_diff_df[:5].iterrows():
df_forecast = pd.read_pickle(f"forecast/{row['normalize_function']}/{row['model_name']}.p")
if (row['extra_hidden_layer'] == 1):
file_name = f"{row['model_name'].rsplit('-',1)[0]}-HL-{row['normalize_function']}"
else:
file_name = f"{row['model_name'].rsplit('-',1)[0]}_{row['normalize_function']}"
if (row['extra_hidden_layer'] == 1):
title = f"Prognoza TGEgasDA modelu {row['model_name'].rsplit('-',1)[0]}_HL"
else:
title = f"Prognoza TGEgasDA modelu {row['model_name'].rsplit('-',1)[0]}"
df_joined = spot_df.join(df_forecast)
df_joined['forecast'] = df_joined['forecast_diff'] + df_joined['TGEgasDA'].shift()
df_joined['residuals'] = (df_joined['TGEgasDA'] - df_joined['forecast']).abs()
df_joined['forecast_val'] = df_joined.loc[val_index[0] : val_index[1], 'forecast']
df_joined['residuals_val'] = df_joined.loc[val_index[0] : val_index[1], 'residuals']
df_joined['forecast_test'] = df_joined.loc[test_index[0] : test_index[1], 'forecast']
df_joined['residuals_test'] = df_joined.loc[test_index[0] : test_index[1], 'residuals']
df_joined.head()
sarima_forecast_plots(df_joined,
title,
['Cena [PLN/MWh]', 'Błąd bezwzględny [PLN/MWh]'],
'Dzień dostawy kontraktu',
save_file=f"Documentation/{file_name}_full",
residuals=False)
sarima_forecast_plots(df_joined[~df_joined['forecast'].isna()],
title,
['Cena [PLN/MWh]', 'Błąd bezwzględny [PLN/MWh]'],
'Dzień dostawy kontraktu',
save_file=f"Documentation/{file_name}_test")
#pct_change
for a, row in best_pct_df[:5].iterrows():
df_forecast = pd.read_pickle(f"forecast/{row['normalize_function']}/{row['model_name']}.p")
if (row['extra_hidden_layer'] == 1):
file_name = f"{row['model_name'].rsplit('-',1)[0]}-HL-{row['normalize_function']}"
else:
file_name = f"{row['model_name'].rsplit('-',1)[0]}_{row['normalize_function']}"
if (row['extra_hidden_layer'] == 1):
title = f"Prognoza TGEgasDA modelu {row['model_name'].rsplit('-',1)[0]}_HL"
else:
title = f"Prognoza TGEgasDA modelu {row['model_name'].rsplit('-',1)[0]}"
df_joined = spot_df.join(df_forecast)
df_joined['forecast'] = (1 + df_joined['forecast_pct_change']) * df_joined['TGEgasDA'].shift()
df_joined['residuals'] = (df_joined['TGEgasDA'] - df_joined['forecast']).abs()
df_joined['forecast_val'] = df_joined.loc[val_index[0] : val_index[1], 'forecast']
df_joined['residuals_val'] = df_joined.loc[val_index[0] : val_index[1], 'residuals']
df_joined['forecast_test'] = df_joined.loc[test_index[0] : test_index[1], 'forecast']
df_joined['residuals_test'] = df_joined.loc[test_index[0] : test_index[1], 'residuals']
df_joined.head()
sarima_forecast_plots(df_joined,
title,
['Cena [PLN/MWh]', 'Błąd bezwzględny [PLN/MWh]'],
'Dzień dostawy kontraktu',
save_file=f"Documentation/{file_name}_full",
residuals=False)
sarima_forecast_plots(df_joined[~df_joined['forecast'].isna()],
title,
['Cena [PLN/MWh]', 'Błąd bezwzględny [PLN/MWh]'],
'Dzień dostawy kontraktu',
save_file=f"Documentation/{file_name}_test")
|
<gh_stars>1-10
# utils
import os
import numpy as np
from PIL import Image
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
#########
# Utils #
#########
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield str(arg)
def save_args(args, filename):
with open(filename, 'w') as f:
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
def write_to_log(txt_filename, msg):
with open(txt_filename, 'a') as f:
f.write('{}\n'.format(msg))
def makedir(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def make_dir_from_list(dirpath_list):
for dirpath in dirpath_list:
makedir(dirpath)
def load_checkpoint(fpath, model):
ckpt = torch.load(fpath, map_location='cpu')['model']
load_dict = {}
for k, v in ckpt.items():
if k.startswith('module.'):
k_ = k.replace('module.', '')
load_dict[k_] = v
else:
load_dict[k] = v
model.load_state_dict(load_dict)
return model
######################
# Data preprocessing #
######################
def split_data_array(data_array):
n_frames = len(data_array)
ref_idx = n_frames // 2
ref_dat = data_array[ref_idx]
nghbr_dats = [data_array[idx] for idx in range(n_frames) if idx != ref_idx]
return ref_dat, nghbr_dats
def data_preprocess(data_array, cur_batch_size):
# 1. Split data array
ref_dat, nghbr_dats = split_data_array(data_array)
num_views = len(nghbr_dats)
# 2. Obtain pose
nghbr_poses = torch.zeros((cur_batch_size, num_views, 4, 4))
is_valid = torch.ones((cur_batch_size, num_views), dtype=torch.int)
ref_extM = ref_dat['extM'] # batch_size X 4 X 4
nghbr_extMs = [nghbr_dat['extM'] for nghbr_dat in nghbr_dats] # list of (batch_size X 4 X 4)
for i in range(cur_batch_size):
ext_ref = ref_extM[i, :, :]
if torch.isnan(ext_ref.min()):
is_valid[i, :] = 0
else:
for j in range(num_views):
ext_nghbr = nghbr_extMs[j][i, :, :]
if torch.isnan(ext_nghbr.min()):
is_valid[i, j] = 0
else:
nghbr_pose = ext_nghbr.mm(torch.from_numpy(np.linalg.inv(ext_ref)))
if torch.isnan(nghbr_pose.min()):
is_valid[i, j] = 0
else:
nghbr_poses[i, j, :, :] = nghbr_pose
return ref_dat, nghbr_dats, nghbr_poses, is_valid
##############
# Evaluation #
##############
def compute_depth_errors(gt, pred, var=None):
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_diff = np.mean(np.abs(gt - pred))
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean()
irmse = (1/gt - 1/pred) ** 2
irmse = np.sqrt(irmse.mean())
if var is not None:
var[var < 1e-6] = 1e-6
nll = 0.5 * (np.log(var) + np.log(2*np.pi) + (np.square(gt - pred) / var))
nll = np.mean(nll)
else:
nll = 0.0
return dict(a1=a1, a2=a2, a3=a3,
abs_diff=abs_diff,
abs_rel=abs_rel, sq_rel=sq_rel,
rmse=rmse, log_10=log_10, irmse=irmse,
rmse_log=rmse_log, silog=silog,
nll=nll)
class RunningAverage:
def __init__(self):
self.avg = 0
self.count = 0
def append(self, value):
self.avg = (value + self.count * self.avg) / (self.count + 1)
self.count += 1
def get_value(self):
return self.avg
class RunningAverageDict:
def __init__(self):
self._dict = None
def update(self, new_dict):
if self._dict is None:
self._dict = dict()
for key, value in new_dict.items():
self._dict[key] = RunningAverage()
for key, value in new_dict.items():
self._dict[key].append(value)
def get_value(self):
return {key: value.get_value() for key, value in self._dict.items()}
def log_metrics(txt_path, metrics, first_line):
print('{}'.format(first_line))
print("abs_rel abs_diff sq_rel rmse rmse_log irmse log_10 silog a1 a2 a3 NLL")
print("%.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f" % (
metrics['abs_rel'], metrics['abs_diff'],
metrics['sq_rel'], metrics['rmse'],
metrics['rmse_log'], metrics['irmse'],
metrics['log_10'], metrics['silog'],
metrics['a1'], metrics['a2'], metrics['a3'],
metrics['nll']))
with open(txt_path, 'a') as f:
f.write('{}\n'.format(first_line))
f.write("abs_rel abs_diff sq_rel rmse rmse_log irmse log_10 silog a1 a2 a3 NLL\n")
f.write("%.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f\n\n" % (
metrics['abs_rel'], metrics['abs_diff'],
metrics['sq_rel'], metrics['rmse'],
metrics['rmse_log'], metrics['irmse'],
metrics['log_10'], metrics['silog'],
metrics['a1'], metrics['a2'], metrics['a3'],
metrics['nll']))
#################
# Visualization #
#################
__imagenet_stats = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
def unnormalize(img_in):
img_out = np.zeros(img_in.shape)
for ich in range(3):
img_out[:, :, ich] = img_in[:, :, ich] * __imagenet_stats['std'][ich]
img_out[:, :, ich] += __imagenet_stats['mean'][ich]
img_out = (img_out * 255).astype(np.uint8)
return img_out
# visualize during training (DNET)
def visualize_D(args, img, gt_dmap, gt_dmap_mask, out, total_iter):
if args.dataset_name == 'scannet':
d_max = 5.0
e_max = 0.5
else:
d_max = 60.0
e_max = 3.0
pred_dmap, pred_var = torch.split(out, 1, dim=1) # (B, 1, H, W)
pred_stdev = torch.sqrt(pred_var)
img = img.detach().cpu().permute(0, 2, 3, 1).numpy()[0, ...] # (H, W, 3)
gt_dmap = gt_dmap.detach().cpu().permute(0, 2, 3, 1).numpy()[0, :, :, 0] # (H, W)
pred_dmap = pred_dmap.detach().cpu().permute(0, 2, 3, 1).numpy()[0, :, :, 0] # (H, W)
pred_stdev = pred_stdev.detach().cpu().permute(0, 2, 3, 1).numpy()[0, :, :, 0] # (H, W)
# save image
target_path = '%s/%08d_img.jpg' % (args.exp_vis_dir, total_iter)
plt.imsave(target_path, unnormalize(img))
# gt dmap
target_path = '%s/%08d_gt_dmap.jpg' % (args.exp_vis_dir, total_iter)
plt.imsave(target_path, gt_dmap, vmin=0.0, vmax=d_max, cmap='jet')
# pred dmap
target_path = '%s/%08d_pred_dmap.jpg' % (args.exp_vis_dir, total_iter)
plt.imsave(target_path, pred_dmap, vmin=0.0, vmax=d_max, cmap='jet')
# pred emap
pred_emap = np.abs(pred_dmap - gt_dmap)
pred_emap[gt_dmap < args.min_depth] = 0.0
pred_emap[gt_dmap > args.max_depth] = 0.0
target_path = '%s/%08d_pred_emap.jpg' % (args.exp_vis_dir, total_iter)
plt.imsave(target_path, pred_emap, vmin=0.0, vmax=e_max, cmap='Reds')
# pred stdev
target_path = '%s/%08d_pred_stdev.jpg' % (args.exp_vis_dir, total_iter)
plt.imsave(target_path, pred_stdev, vmin=0.0, vmax=e_max, cmap='Reds')
# visualize during training (FNET)
def visualize_F(args, img, gt_dmap, gt_dmap_mask, pred_dmap, total_iter):
if args.dataset_name == 'scannet':
d_max = 5.0
e_max = 0.5
else:
d_max = 60.0
e_max = 3.0
# upsample
pred_dmap = F.interpolate(pred_dmap, size=[img.shape[2], img.shape[3]], mode='nearest')
# to numpy array
img = img.detach().cpu().permute(0, 2, 3, 1).numpy()[0, ...] # (H, W, 3)
gt_dmap = gt_dmap.detach().cpu().permute(0, 2, 3, 1).numpy()[0, :, :, 0] # (H, W)
pred_dmap = pred_dmap.detach().cpu().permute(0, 2, 3, 1).numpy()[0, :, :, 0] # (H, W)
# save image
target_path = '%s/%08d_img.jpg' % (args.exp_vis_dir, total_iter)
plt.imsave(target_path, unnormalize(img))
# gt dmap
target_path = '%s/%08d_gt_dmap.jpg' % (args.exp_vis_dir, total_iter)
plt.imsave(target_path, gt_dmap, vmin=0.0, vmax=d_max, cmap='jet')
# pred dmap
target_path = '%s/%08d_pred_dmap.jpg' % (args.exp_vis_dir, total_iter)
plt.imsave(target_path, pred_dmap, vmin=0.0, vmax=d_max, cmap='jet')
# pred emap
pred_emap = np.abs(pred_dmap - gt_dmap)
pred_emap[gt_dmap < args.min_depth] = 0.0
pred_emap[gt_dmap > args.max_depth] = 0.0
target_path = '%s/%08d_pred_emap.jpg' % (args.exp_vis_dir, total_iter)
plt.imsave(target_path, pred_emap, vmin=0.0, vmax=e_max, cmap='Reds')
# visualize during training (MAGNET)
def visualize_MaG(args, img, gt_dmap, gt_dmap_mask, pred_list, total_iter):
if args.dataset_name == 'nyu' or args.dataset_name == 'scannet':
d_max = 5.0
e_max = 0.5
else:
d_max = 60.0
e_max = 3.0
img = img.detach().cpu().permute(0, 2, 3, 1).numpy()[0, ...] # (H, W, 3)
gt_dmap = gt_dmap.detach().cpu().permute(0, 2, 3, 1).numpy()[0, :, :, 0] # (H, W)
# save image
target_path = '%s/%08d_img.jpg' % (args.exp_vis_dir, total_iter)
plt.imsave(target_path, unnormalize(img))
# gt dmap
target_path = '%s/%08d_gt_dmap.jpg' % (args.exp_vis_dir, total_iter)
plt.imsave(target_path, gt_dmap, vmin=0.0, vmax=d_max, cmap='jet')
for i in range(len(pred_list)):
pred_dmap, pred_stdev = torch.split(pred_list[i], 1, dim=1) # (B, 1, H, W)
pred_dmap = pred_dmap.detach().cpu().permute(0, 2, 3, 1).numpy()[0, :, :, 0] # (H, W)
pred_stdev = pred_stdev.detach().cpu().permute(0, 2, 3, 1).numpy()[0, :, :, 0] # (H, W)
# pred dmap
target_path = '%s/%08d_pred_dmap_iter%02d.jpg' % (args.exp_vis_dir, total_iter, i)
plt.imsave(target_path, pred_dmap, vmin=0.0, vmax=d_max, cmap='jet')
# pred emap
pred_emap = np.abs(pred_dmap - gt_dmap)
pred_emap[gt_dmap < args.min_depth] = 0.0
pred_emap[gt_dmap > args.max_depth] = 0.0
target_path = '%s/%08d_pred_emap_iter%02d.jpg' % (args.exp_vis_dir, total_iter, i)
plt.imsave(target_path, pred_emap, vmin=0.0, vmax=e_max, cmap='Reds')
# pred stdev
target_path = '%s/%08d_pred_stdev_iter%02d.jpg' % (args.exp_vis_dir, total_iter, i)
plt.imsave(target_path, pred_stdev, vmin=0.0, vmax=e_max, cmap='Reds')
|
<reponame>Young-Excavator/meta_LSM
""" Utility functions. """
import numpy as np
import os
import random
import tensorflow as tf
import pandas as pd
from tensorflow.contrib.layers.python import layers as tf_layers
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
def normalize(inp, activation, reuse, scope):
if FLAGS.norm == 'batch_norm':
return tf_layers.batch_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif FLAGS.norm == 'layer_norm':
return tf_layers.layer_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif FLAGS.norm == 'None':
if activation is not None:
return activation(inp)
else:
return inp
## Loss functions
def mse(pred, label):
pred = tf.reshape(pred, [-1])
label = tf.reshape(label, [-1])
return tf.reduce_mean(tf.square(pred-label))
def xent(pred, label):
# Note - with tf version <=0.12, this loss has incorrect 2nd derivatives
return tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=label) / tf.to_float(tf.shape(label)[0]) # 注意归一
# def xent(pred, label):
# # Note - with tf version <=0.12, this loss has incorrect 2nd derivatives
# return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=label))
def tasksbatch_generator(data, batch_size, num_samples, dim_input, dim_output):
"""generate batch tasks"""
init_inputs = np.zeros([batch_size, num_samples, dim_input], dtype=np.float32)
labels = np.zeros([batch_size, num_samples, dim_output], dtype=np.float32)
np.random.shuffle(data)
start_index = np.random.randint(0, len(data) - batch_size)
batch_tasks = data[start_index:(start_index + batch_size)]
cnt_sample = []
for i in range(len(batch_tasks)):
cnt_sample.append(len(batch_tasks[i]))
for i in range(batch_size):
np.random.shuffle(batch_tasks[i])
start_index1 = np.random.randint(0, len(batch_tasks[i]) - num_samples)
task_samples = batch_tasks[i][start_index1:(start_index1 + num_samples)]
for j in range(num_samples):
init_inputs[i][j] = task_samples[j][0]
if task_samples[j][1] == 1:
labels[i][j][0] = 1 # 滑坡
else:
labels[i][j][1] = 1 # 非滑坡
return init_inputs, labels, np.array(cnt_sample).astype(np.float32)
# for each task
def sample_generator(one_task, dim_input, dim_output):
"""generate samples from one tasks"""
np.random.shuffle(one_task)
num_samples = len(one_task)
init_inputs = np.zeros([1, num_samples, dim_input], dtype=np.float32)
labels = np.zeros([1, num_samples, dim_output], dtype=np.float32)
for i in range(num_samples):
init_inputs[0][i] = one_task[i][0]
if one_task[i][1] == 1:
labels[0][i][0] = 1
else:
labels[0][i][1] = 1
return init_inputs, labels
# for each region (e.g., FJ&FL)
def sample_generator_(tasks, dim_input, dim_output):
all_samples = np.array(tasks[0])
num_samples =5
for i in range(len(tasks)-1):
if len(tasks[i+1]) > 0:
all_samples = np.vstack((all_samples, np.array(tasks[i+1])))
init_inputs = np.zeros([1, num_samples, dim_input], dtype=np.float32)
labels = np.zeros([1, num_samples, dim_output], dtype=np.float32)
for i in range(num_samples):
init_inputs[0][i] = all_samples[i][:-1]
if all_samples[i][-1] == 1:
labels[0][i][0] = 1
else:
labels[0][i][1] = 1
return init_inputs, labels
def meta_train_test(fj_tasks, fl_tasks, mode=0):
test1_fj_tasks, test1_fl_tasks, resd_tasks, one_test_tasks = [], [], [], []
_train, _test = [], []
# np.random.shuffle(tasks)
if mode==0:
elig_tasks = []
for i in range(len(fj_tasks)):
if len(fj_tasks[i]) > FLAGS.num_samples_each_task:
elig_tasks.append(fj_tasks[i])
elif len(fj_tasks[i]) > 10: # set 10 to test K=10-shot learning
test1_fj_tasks.append(fj_tasks[i])
else:
resd_tasks.append(fj_tasks[i])
_train = elig_tasks[:int(len(elig_tasks) / 4 * 3)]
_test = elig_tasks[int(len(elig_tasks) / 4 * 3):] + test1_fj_tasks
for i in range(len(resd_tasks)): # resd_tasks暂时不用
one_test_tasks.extend(resd_tasks[i])
return _train, _test
if mode==1:
for i in range(len(fj_tasks)):
if len(fj_tasks[i]) > FLAGS.num_samples_each_task:
_train.append(fj_tasks[i])
for i in range(len(fl_tasks)):
if len(fl_tasks[i]) > 10:
_test.append(fl_tasks[i])
return _train, _test
if mode==2 or mode==3:
elig_fj_tasks, elig_fl_tasks = [], []
for i in range(len(fj_tasks)):
if len(fj_tasks[i]) > FLAGS.num_samples_each_task:
elig_fj_tasks.append(fj_tasks[i])
elif len(fj_tasks[i]) > 10:
test1_fj_tasks.append(fj_tasks[i])
for i in range(len(fl_tasks)):
if len(fl_tasks[i]) > FLAGS.num_samples_each_task:
elig_fl_tasks.append(fl_tasks[i])
elif len(fl_tasks[i]) > 10:
test1_fl_tasks.append(fl_tasks[i])
if mode==2:
_train = elig_fj_tasks[:int(len(elig_fj_tasks) / 4 * 3)] + elig_fl_tasks
_test = elig_fj_tasks[int(len(elig_fj_tasks) / 4 * 3):] + test1_fj_tasks
return _train, _test
elif mode==3:
_train = elig_fj_tasks + elig_fl_tasks[:int(len(elig_fj_tasks) / 2)]
_test = elig_fl_tasks[int(len(elig_fl_tasks) / 2):] + test1_fl_tasks
return _train, _test
# _test.extend(resid_tasks)
def save_tasks(tasks):
"""将tasks存到csv中"""
writer = pd.ExcelWriter('./seg_output/' + FLAGS.str_region + '_tasks.xlsx')
for i in range(len(tasks)):
task_sampels = []
for j in range(len(tasks[i])):
attr_lb = np.append(tasks[i][j][0], tasks[i][j][1])
task_sampels.append(attr_lb)
data_df = pd.DataFrame(task_sampels)
data_df.to_excel(writer, 'task_'+str(i), float_format='%.5f', header=False, index=False)
writer.save()
writer.close()
def read_tasks(file):
"""获取tasks"""
f = pd.ExcelFile(file)
tasks = [[] for i in range(len(f.sheet_names))]
k = 0
for sheetname in f.sheet_names:
attr = pd.read_excel(file, usecols=FLAGS.dim_input-1, sheet_name=sheetname).values.astype(np.float32)
label = pd.read_excel(file, usecols=[FLAGS.dim_input], sheet_name=sheetname).values.reshape((-1,)).astype(np.float32)
for j in range(np.shape(attr)[0]):
tasks[k].append([attr[j], label[j]])
k += 1
return tasks
def savepts_fortask(clusters, file):
writer = pd.ExcelWriter(file)
count = 0
for cluster in clusters:
pts = []
for pixel in cluster.pixels:
pts.append(pixel)
data_df = pd.DataFrame(pts)
data_df.to_excel(writer, 'task_'+str(count), float_format='%.5f', header=False, index=False)
count = count+1
writer.save()
writer.close()
def read_pts(file):
"""获取tasks"""
f = pd.ExcelFile(file)
tasks = []
for sheetname in f.sheet_names:
arr = pd.read_excel(file, sheet_name=sheetname).values.astype(np.float32)
tasks.append(arr)
return tasks
|
<filename>_broken/caffe-segnet-stuff/backend/find_segnet_caffe.py
#!/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import platform
import sys
import ubelt as ub
from os.path import expanduser, join, exists, abspath # NOQA
from pysseg.util import gpu_util
from pysseg import getLogger
logger = getLogger(__name__)
print = logger.info
def get_segnet_caffe_python_root():
"""
Returns the directory containing the segnet pycaffe module.
TODO:
generalize me
"""
python_caffe_root = join(get_segnet_caffe_root(), 'python')
if not exists(python_caffe_root):
raise RuntimeError('python_segnet_caffe_root does not exist')
print('python_caffe_root exists = {!r}'.format(python_caffe_root))
return python_caffe_root
def get_segnet_caffe_root():
"""
Returns the directory containing the segnet pycaffe module.
TODO:
generalize me
"""
paths_to_check = [
'~/code/caffe-segnet-cudnn5',
'~/sseg/caffe-segnet',
'caffe-segnet',
]
if 'CAFFE_SEGNET_ROOT' in os.environ:
paths_to_check.insert(0, os.environ['CAFFE_SEGNET_ROOT'])
for path in paths_to_check:
path = expanduser(path)
if exists(path):
caffe_root = path
print('caffe_root exists = {!r}'.format(caffe_root))
return caffe_root
raise RuntimeError('segnet-caffe-root does not exist')
def find_segnet_caffe_bin():
if 'CAFFE_SEGNET_BIN' in os.environ:
if exists(os.environ['CAFFE_SEGNET_BIN']):
caffe_bin = os.environ['CAFFE_SEGNET_BIN']
return caffe_bin
caffe_bin = join(get_segnet_caffe_root(), 'build/tools/caffe')
if not exists(caffe_bin):
raise IOError('Please write a better caffe bin finder')
return caffe_bin
def import_module_from_fpath(module_fpath):
"""
imports module from a file path
Args:
module_fpath (str):
Returns:
module: module
Example:
>>> module_fpath = '/path/to/a/python_module'
>>> module = import_module_from_fpath(module_fpath)
>>> print('module = {!r}'.format(module))
"""
from os.path import basename, splitext, isdir, join, exists, dirname, split
if isdir(module_fpath):
module_fpath = join(module_fpath, '__init__.py')
print('importing module_fpath = {!r}'.format(module_fpath))
if not exists(module_fpath):
raise ImportError('module_fpath={!r} does not exist'.format(
module_fpath))
python_version = platform.python_version()
modname = splitext(basename(module_fpath))[0]
if modname == '__init__':
modname = split(dirname(module_fpath))[1]
if python_version.startswith('2.7'):
import imp
module = imp.load_source(modname, module_fpath)
elif python_version.startswith('3'):
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(modname, module_fpath)
module = loader.load_module()
else:
raise AssertionError('invalid python version={!r}'.format(
python_version))
return module
CAFFE_SEGNET_MODULE = None
def import_segnet_caffe(gpu_num=ub.NoParam):
"""
from pysseg.backend.find_segnet_caffe import get_segnet_caffe_python_root, PYTHONPATH_CONTEXT
from pysseg.backend import find_segnet_caffe
find_segnet_caffe.CAFFE_SEGNET_MODULE
find_segnet_caffe.CAFFE_SEGNET_MODULE = None
caffe = find_segnet_caffe.import_segnet_caffe()
"""
global CAFFE_SEGNET_MODULE
# TODO: should rename caffe modulename to segnet-caffe
if gpu_num is ub.NoParam:
if CAFFE_SEGNET_MODULE is None:
gpu_num = gpu_util.find_unused_gpu()
else:
gpu_num = CAFFE_SEGNET_MODULE.GPU_NUM
# Method 1
if CAFFE_SEGNET_MODULE is None:
print('Attempting to load segnet-caffe module')
caffe_root = get_segnet_caffe_python_root()
# sys.path.insert(0, caffe_root)
# if 'caffe' in sys.modules:
# del sys.modules['caffe']
# for key in list(sys.modules.keys()):
# if key.startswith('caffe.'):
# del sys.modules[key]
with PYTHONPATH_CONTEXT(caffe_root):
import caffe
CAFFE_SEGNET_MODULE = caffe
# does pycaffe expose flags describing if it was built with GPU support?
# ...probably not :(
if gpu_num is None:
print('setting caffe mode to CPU')
CAFFE_SEGNET_MODULE.set_mode_cpu()
CAFFE_SEGNET_MODULE.GPU_NUM = gpu_num
else:
print('setting caffe mode to GPU {}'.format(gpu_num))
CAFFE_SEGNET_MODULE.set_mode_gpu()
CAFFE_SEGNET_MODULE.set_device(gpu_num)
CAFFE_SEGNET_MODULE.GPU_NUM = gpu_num
else:
pass
# print('Return previous segnet-caffe module')
if CAFFE_SEGNET_MODULE.GPU_NUM != gpu_num:
if gpu_num is None:
print('setting caffe mode to CPU')
CAFFE_SEGNET_MODULE.set_mode_cpu()
CAFFE_SEGNET_MODULE.GPU_NUM = gpu_num
else:
print('setting caffe mode to GPU {}'.format(gpu_num))
CAFFE_SEGNET_MODULE.set_mode_gpu()
CAFFE_SEGNET_MODULE.set_device(gpu_num)
CAFFE_SEGNET_MODULE.GPU_NUM = gpu_num
# Method 2
# pycaffe_fpath = join(get_segnet_caffe_python_root(), 'caffe')
# caffe = import_module_from_fpath(pycaffe_fpath)
return CAFFE_SEGNET_MODULE
# METHOD 1
# Change this to the absolute directoy to SegNet Caffe
# caffe_path = join(get_segnet_caffe_root(), 'python')
# sys.path.insert(0, caffe_path)
# try:
# import caffe
# except ImportError:
# print('Caffe was not found in caffe_path = {!r}'.format(caffe_path))
# raise
class PYTHONPATH_CONTEXT(object):
"""
Attempt to be a little safer when mucking with the PYTHONPATH
"""
def __init__(self, path):
self.path = path
def __enter__(self):
# insert the path in to PYTHONPATH
sys.path.insert(0, self.path)
def __exit__(self, type_, value, trace):
if sys.path[0] != self.path:
raise RuntimeError('PYTHONPATH was changed inside this context')
# Remove the path from PYTHONPATH
del sys.path[0]
if trace is not None:
# return False on error
return False # nocover
|
<filename>Plots/Contours/NCL_color_1.py<gh_stars>1-10
"""
NCL_color_1.py
===============
This script illustrates the following concepts:
- Drawing a horizonal color bar
- Adjusting a colorbar position relative to plot axes
- Recreating a default NCL colormap
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/color_1.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/color_1_lg.png
Note:
This may not be the best colormap to interpret the information, but was included here in order to
demonstrate how to recreate the original NCL colormap. For more information on colormap choices, see the
Colors examples in the GeoCAT-examples documentation.
"""
###############################################################################
# Import packages:
import cartopy.crs as ccrs
import geocat.datafiles as gdf
import geocat.viz.util as gvutil
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from geocat.viz import cmaps as gvcmaps
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and load the data into xarray
ds = xr.open_dataset(gdf.get("netcdf_files/uv300.nc")).isel(time=1)
###############################################################################
# Plot:
# Generate figure and set its size in (width, height)
fig = plt.figure(figsize=(10, 8))
# Generate axes using Cartopy to draw coastlines
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines(linewidth=0.5, alpha=0.6)
# Use geocat.viz.util convenience function to set axes limits & tick values
gvutil.set_axes_limits_and_ticks(ax,
xlim=(-180, 180),
ylim=(-90, 90),
xticks=np.linspace(-180, 180, 13),
yticks=np.linspace(-90, 90, 7))
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax, labelsize=10)
# Use geocat.viz.util convenience function to make latitude, longitude tick labels
gvutil.add_lat_lon_ticklabels(ax)
# Import the default color map
newcmp = gvcmaps.ncl_default
# Define contour levels
levels = np.arange(-16, 48, 4)
# Define dictionary for kwargs
kwargs = dict(
levels=levels,
xticks=np.arange(-180, 181, 30), # nice x ticks
yticks=np.arange(-90, 91, 30), # nice y ticks
add_colorbar=False, # allow for colorbar specification later
transform=ccrs.PlateCarree(), # ds projection
)
# Contouf-plot U data (for filled contours)
fillplot = ds.U.plot.contourf(ax=ax, cmap=newcmp, **kwargs)
# Create horizonal color bar
# By changing the kwarg `pad`, the colorbar can be moved closer to or farther away from
# the axis parallel to it.
# `pad` defaults to 0.15 for horizontal colorbars
fig.colorbar(fillplot,
orientation="horizontal",
ticks=np.arange(-12, 44, 4),
label='',
shrink=0.75,
pad=0.11)
# Plot line contours
ds.U.plot.contour(ax=ax,
colors='black',
alpha=0.8,
linewidths=0.4,
linestyles='solid',
add_labels=False,
levels=levels,
transform=ccrs.PlateCarree())
# Use geocat.viz.util convenience function to add titles to left and right of the plot axis.
gvutil.set_titles_and_labels(ax,
maintitle="Default Color",
lefttitle=ds.U.long_name,
lefttitlefontsize=16,
righttitle=ds.U.units,
righttitlefontsize=16,
xlabel="",
ylabel="")
# Show the plot
plt.show()
|
<reponame>rahulgupta9202/ColossalAI<filename>colossalai/communication/p2p.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import torch.distributed as dist
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
def _communicate(tensor_send_next=None,
tensor_send_prev=None,
recv_prev=False,
recv_next=False,
recv_prev_shape=None,
recv_next_shape=None,
prev_rank=None,
next_rank=None,
up_group=None,
down_group=None,
dtype=None):
"""
Adapted from megatron.p2p_communication.
Communicate tensors between stages. Used as helper method in other
communication methods that are used in pipeline schedule.
Takes the following arguments:
tensor_send_next: tensor to send to next rank (no tensor sent if
set to None).
tensor_send_prev: tensor to send to prev rank (no tensor sent if
set to None).
recv_prev: boolean for whether tensor should be received from
previous rank.
recv_next: boolean for whether tensor should be received from
next rank.
Returns:
(tensor_recv_prev, tensor_recv_next)
"""
# Create placeholder tensors for receive in forward and backward directions
# if needed.
tensor_recv_prev = None
tensor_recv_next = None
if recv_prev:
assert recv_prev_shape is not None
tensor_recv_prev = torch.empty(recv_prev_shape,
requires_grad=True,
device=get_current_device(),
dtype=dtype)
if recv_next:
assert recv_next_shape is not None
tensor_recv_next = torch.empty(recv_next_shape,
requires_grad=True,
device=get_current_device(),
dtype=dtype)
if tensor_send_prev is not None or recv_prev:
if prev_rank is None:
prev_rank = gpc.get_prev_global_rank(
ParallelMode.PIPELINE)
if up_group is None:
up_group = gpc.get_group(ParallelMode.PIPELINE_PREV)
if tensor_send_next is not None or recv_next:
if next_rank is None:
next_rank = gpc.get_next_global_rank(
ParallelMode.PIPELINE)
if down_group is None:
down_group = gpc.get_group(ParallelMode.PIPELINE_NEXT)
# rank = dist.get_rank()
rank = gpc.get_global_rank()
ops = []
if tensor_send_prev is not None:
send_prev_op = dist.broadcast(tensor_send_prev,
src=rank,
group=up_group,
async_op=True)
ops.append(send_prev_op)
if tensor_recv_prev is not None:
recv_prev_op = dist.broadcast(tensor_recv_prev,
src=prev_rank,
group=up_group,
async_op=True)
ops.append(recv_prev_op)
if tensor_recv_next is not None:
recv_next_op = dist.broadcast(tensor_recv_next,
src=next_rank,
group=down_group,
async_op=True)
ops.append(recv_next_op)
if tensor_send_next is not None:
send_next_op = dist.broadcast(tensor_send_next,
src=rank,
group=down_group,
async_op=True)
ops.append(send_next_op)
for req in ops:
req.wait()
# To protect against race condition when using batch_isend_irecv().
torch.cuda.synchronize()
return tensor_recv_prev, tensor_recv_next
def recv_forward(input_tensor_shape, prev_rank=None, up_group=None):
"""Receives the input tensor from the previous member in pipeline.
:param input_tensor_shape: The shape of the tensor to be recieved
:param prev_rank: The rank of the source of the tensor
:param up_group: Communication group including the previous member in pipeline parallel group
:type input_tensor_shape: torch.Size
:type prev_rank: int, optional
:type up_group: ProcessGroup, optional
:return: The input tensor in forward step
:rtype: Tensor
"""
if gpc.is_first_rank(ParallelMode.PIPELINE):
input_tensor = None
else:
input_tensor, _ = _communicate(recv_prev=True,
recv_prev_shape=input_tensor_shape,
prev_rank=prev_rank,
up_group=up_group)
return input_tensor
def recv_backward(output_grad_shape, next_rank=None, down_group=None):
"""Receives the grad tensor from the next member in pipeline.
:param output_grad_shape: The shape of the tensor to be recieved
:param next_rank: The rank of the source of the tensor
:param down_group: Communication group including the next member in pipeline parallel group
:type output_grad_shape: torch.Size
:type next_rank: int, optional
:type down_group: ProcessGroup, optional
:return: The grad of output tensor in forward step
:rtype: Tensor
"""
if gpc.is_last_rank(ParallelMode.PIPELINE):
output_tensor_grad = None
else:
_, output_tensor_grad = _communicate(recv_next=True,
recv_next_shape=output_grad_shape,
next_rank=next_rank,
down_group=down_group)
return output_tensor_grad
def send_forward(output_tensor,
next_rank=None,
down_group=None):
"""Sends the input tensor to the next member in pipeline.
:param output_tensor: Tensor to be sent
:param next_rank: The rank of the recipient of the tensor
:param down_group: Communication group including the next member in pipeline parallel group
:type output_tensor: Tensor
:type next_rank: int, optional
:type down_group: ProcessGroup, optional
"""
if not gpc.is_last_rank(ParallelMode.PIPELINE):
_communicate(tensor_send_next=output_tensor,
next_rank=next_rank,
down_group=down_group)
def send_backward(input_tensor_grad,
prev_rank=None,
up_group=None):
"""Sends the grad tensor to the previous member in pipeline.
:param input_tensor_grad: Tensor to be sent
:param prev_rank: The rank of the recipient of the tensor
:param up_group: Communication group including the previous member in pipeline parallel group
:type input_tensor_grad: Tensor
:type prev_rank: int, optional
:type up_group: ProcessGroup, optional
"""
if not gpc.is_first_rank(ParallelMode.PIPELINE):
_communicate(tensor_send_prev=input_tensor_grad,
prev_rank=prev_rank,
up_group=up_group)
def send_forward_recv_backward(output_tensor,
output_grad_shape,
recv_next=True,
next_rank=None,
down_group=None):
"""Batched communication operation. Sends the input tensor to the
next member in pipeline, while recieves the grad tensor from the
next member in pipeline.
:param output_tensor: Tensor to be sent
:param output_grad_shape: The shape of the tensor to be recieved
:type output_tensor: Tensor
:type output_grad_shape: torch.Size
:return: The grad of output tensor in forward step
:rtype: Tensor
"""
if gpc.is_last_rank(ParallelMode.PIPELINE):
output_tensor_grad = None
else:
_, output_tensor_grad = _communicate(tensor_send_next=output_tensor,
recv_next=recv_next,
recv_next_shape=output_grad_shape,
next_rank=next_rank,
down_group=down_group)
return output_tensor_grad
def send_backward_recv_forward(input_tensor_grad,
input_tensor_shape,
recv_prev=True,
prev_rank=None,
up_group=None):
"""Batched communication operation. Sends the grad tensor to the
previous member in pipeline, while recieves the input tensor from the
previous member in pipeline.
:param input_tensor_grad: Tensor to be sent
:param input_tensor_shape: The shape of the tensor to be recieved
:type input_tensor_grad: Tensor
:type input_tensor_shape: torch.Size
:return: The input tensor in forward step
:rtype: Tensor
"""
if gpc.is_first_rank(ParallelMode.PIPELINE):
input_tensor = None
else:
input_tensor, _ = _communicate(tensor_send_prev=input_tensor_grad,
recv_prev=recv_prev,
recv_prev_shape=input_tensor_shape,
prev_rank=prev_rank,
up_group=up_group)
return input_tensor
def send_forward_recv_forward(output_tensor,
input_tensor_shape,
recv_prev=True,
prev_rank=None,
next_rank=None,
up_group=None,
down_group=None):
"""Batched communication operation. Sends the input tensor to the
next member in pipeline, while recieves the input tensor from the
previous member in pipeline.
:param output_tensor: Tensor to be sent
:param input_tensor_shape: The shape of the tensor to be recieved
:type output_tensor: Tensor
:type input_tensor_shape: torch.Size
:return: The input tensor in forward step
:rtype: Tensor
"""
input_tensor, _ = _communicate(tensor_send_next=output_tensor,
recv_prev=recv_prev,
recv_prev_shape=input_tensor_shape,
prev_rank=prev_rank,
next_rank=next_rank,
up_group=up_group,
down_group=down_group)
return input_tensor
def send_backward_recv_backward(input_tensor_grad,
output_grad_shape,
recv_next=True,
prev_rank=None,
next_rank=None,
up_group=None,
down_group=None):
"""Batched communication operation. Sends the grad tensor to the
previous member in pipeline, while recieves the grad tensor from the
next member in pipeline.
:param input_tensor_grad: Tensor to be sent
:param output_grad_shape: The shape of the tensor to be recieved
:type input_tensor_grad: Tensor
:type output_grad_shape: torch.Size
:return: The grad of output tensor in forward step
:rtype: Tensor
"""
_, output_tensor_grad = _communicate(tensor_send_prev=input_tensor_grad,
recv_next=recv_next,
recv_next_shape=output_grad_shape,
prev_rank=prev_rank,
next_rank=next_rank,
up_group=up_group,
down_group=down_group)
return output_tensor_grad
def send_forward_backward_recv_forward_backward(output_tensor,
input_tensor_grad,
input_tensor_shape,
output_grad_shape,
recv_prev=True,
recv_next=True,
prev_rank=None,
next_rank=None,
up_group=None,
down_group=None):
"""Batched communication operation. Sends the input tensor to the next and
the grad tensor to the previous, while recieves the grad tensor from the
next and the input tensor from the previous.
:param output_tensor: Tensor sent to the next
:param input_tensor_grad: Tensor sent to the previous
:param input_tensor_shape: The shape of the tensor recieved from the previous
:param output_grad_shape: The shape of the tensor recieved from the next
:type output_tensor: Tensor
:type input_tensor_grad: Tensor
:type input_tensor_shape: torch.Size
:type output_grad_shape: torch.Size
:return: (the input tensor in forward step, the grad of output tensor in forward step)
:rtype: (Tensor, Tensor)
"""
input_tensor, output_tensor_grad = _communicate(
tensor_send_next=output_tensor,
tensor_send_prev=input_tensor_grad,
recv_prev=recv_prev,
recv_next=recv_next,
recv_prev_shape=input_tensor_shape,
recv_next_shape=output_grad_shape,
prev_rank=prev_rank,
next_rank=next_rank,
up_group=up_group,
down_group=down_group)
return input_tensor, output_tensor_grad
|
#!/usr/bin/python3
# File: test_review.py
# Authors: <NAME> - <NAME>
# email(s): <<EMAIL>>
# <<EMAIL>>
"""
This Module Defines Unittest for models/review.py.
Unittest classes:
TestAmenity_instantiation
TestAmenity_save
TestAmenity_to_dict
"""
import os
import models
import unittest
from datetime import datetime
from time import sleep
from models.review import Review
class TestReview_instantiation(unittest.TestCase):
"""Unittests for testing instantiation of the Review class."""
def test_no_args_instantiates(self):
self.assertEqual(Review, type(Review()))
def test_new_instance_stored_in_objects(self):
self.assertIn(Review(), models.storage.all().values())
def test_id_is_public_str(self):
self.assertEqual(str, type(Review().id))
def test_created_at_is_public_datetime(self):
self.assertEqual(datetime, type(Review().created_at))
def test_updated_at_is_public_datetime(self):
self.assertEqual(datetime, type(Review().updated_at))
def test_place_id_is_public_class_attribute(self):
rv = Review()
self.assertEqual(str, type(Review.place_id))
self.assertIn("place_id", dir(rv))
self.assertNotIn("place_id", rv.__dict__)
def test_user_id_is_public_class_attribute(self):
rv = Review()
self.assertEqual(str, type(Review.user_id))
self.assertIn("user_id", dir(rv))
self.assertNotIn("user_id", rv.__dict__)
def test_text_is_public_class_attribute(self):
rv = Review()
self.assertEqual(str, type(Review.text))
self.assertIn("text", dir(rv))
self.assertNotIn("text", rv.__dict__)
def test_two_reviews_unique_ids(self):
rv1 = Review()
rv2 = Review()
self.assertNotEqual(rv1.id, rv2.id)
def test_two_reviews_different_created_at(self):
rv1 = Review()
sleep(0.05)
rv2 = Review()
self.assertLess(rv1.created_at, rv2.created_at)
def test_two_reviews_different_updated_at(self):
rv1 = Review()
sleep(0.05)
rv2 = Review()
self.assertLess(rv1.updated_at, rv2.updated_at)
def test_str_representation(self):
dt = datetime.today()
dt_repr = repr(dt)
rv = Review()
rv.id = "123456789"
rv.created_at = rv.updated_at = dt
rvstr = rv.__str__()
self.assertIn("[Review] (123456789)", rvstr)
self.assertIn("'id': '123456789'", rvstr)
self.assertIn("'created_at': " + dt_repr, rvstr)
self.assertIn("'updated_at': " + dt_repr, rvstr)
def test_args_unused(self):
rv = Review(None)
self.assertNotIn(None, rv.__dict__.values())
def test_instantiation_with_kwargs(self):
dt = datetime.today()
dt_iso = dt.isoformat()
rv = Review(id="678", created_at=dt_iso, updated_at=dt_iso)
self.assertEqual(rv.id, "678")
self.assertEqual(rv.created_at, dt)
self.assertEqual(rv.updated_at, dt)
def test_instantiation_with_None_kwargs(self):
with self.assertRaises(TypeError):
Review(id=None, created_at=None, updated_at=None)
class TestReview_save(unittest.TestCase):
"""Unittests for testing save method of the Review class."""
@classmethod
def setUp(self):
try:
os.rename("file.json", "tmp")
except IOError:
pass
def tearDown(self):
try:
os.remove("file.json")
except IOError:
pass
try:
os.rename("tmp", "file.json")
except IOError:
pass
def test_one_save(self):
rv = Review()
sleep(0.05)
first_updated_at = rv.updated_at
rv.save()
self.assertLess(first_updated_at, rv.updated_at)
def test_two_saves(self):
rv = Review()
sleep(0.05)
first_updated_at = rv.updated_at
rv.save()
second_updated_at = rv.updated_at
self.assertLess(first_updated_at, second_updated_at)
sleep(0.05)
rv.save()
self.assertLess(second_updated_at, rv.updated_at)
def test_save_with_arg(self):
rv = Review()
with self.assertRaises(TypeError):
rv.save(None)
def test_save_updates_file(self):
rv = Review()
rv.save()
rvid = "Review." + rv.id
with open("file.json", "r") as f:
self.assertIn(rvid, f.read())
class TestReview_to_dict(unittest.TestCase):
"""Unittests for testing to_dict method of the Review class."""
def test_to_dict_type(self):
self.assertTrue(dict, type(Review().to_dict()))
def test_to_dict_contains_correct_keys(self):
rv = Review()
self.assertIn("id", rv.to_dict())
self.assertIn("created_at", rv.to_dict())
self.assertIn("updated_at", rv.to_dict())
self.assertIn("__class__", rv.to_dict())
def test_to_dict_contains_added_attributes(self):
rv = Review()
rv.middle_name = "Holberton"
rv.my_number = 98
self.assertEqual("Holberton", rv.middle_name)
self.assertIn("my_number", rv.to_dict())
def test_to_dict_datetime_attributes_are_strs(self):
rv = Review()
rv_dict = rv.to_dict()
self.assertEqual(str, type(rv_dict["id"]))
self.assertEqual(str, type(rv_dict["created_at"]))
self.assertEqual(str, type(rv_dict["updated_at"]))
def test_to_dict_output(self):
dt = datetime.today()
rv = Review()
rv.id = "123456789"
rv.created_at = rv.updated_at = dt
tdict = {
'id': '123456789',
'__class__': 'Review',
'created_at': dt.isoformat(),
'updated_at': dt.isoformat(),
}
self.assertDictEqual(rv.to_dict(), tdict)
def test_contrast_to_dict_dunder_dict(self):
rv = Review()
self.assertNotEqual(rv.to_dict(), rv.__dict__)
def test_to_dict_with_arg(self):
rv = Review()
with self.assertRaises(TypeError):
rv.to_dict(None)
if __name__ == "__main__":
unittest.main()
|
<reponame>xingjianleng/cogent3
__author__ = "<NAME>"
__copyright__ = "Copyright 2007-2022, The Cogent Project"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "BSD-3"
__version__ = "2022.4.20a1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def pog_traceback(pogs, aligned_positions):
upto = [0, 0]
align_builder = POGBuilder(pogs)
for posn in aligned_positions:
assert len(posn) == 2
for (dim, pos) in enumerate(posn):
if pos is not None:
align_builder.add_skipped(dim, upto[dim], pos)
upto[dim] = pos + 1
align_builder.add_aligned(posn)
for dim in [0, 1]:
align_builder.add_skipped(dim, upto[dim], len(pogs[dim]))
return align_builder.get_pog()
class POGBuilder(object):
def __init__(self, children):
self.children = children
self.remap = [{} for _ in children]
self.started = [False, False]
self.last = [None, None]
self.result = [[]]
self.origins = [[]]
self.aligned_positions = []
self.states = []
def add_skipped(self, dim, start, end, old_gap=True):
for p in range(start, end):
fp = [None, None]
fp[dim] = p
fp = tuple(fp)
self.add_aligned(fp, old_gap=old_gap)
def add_aligned(self, posn, old_gap=False):
pre_merged = set()
assert len(posn) == 2
for (dim, pos) in enumerate(posn):
if pos is None:
continue
self.remap[dim][pos] = len(self.aligned_positions)
self.last[dim] = pos
self.result.append(pre_merged)
self.aligned_positions.append(posn)
if None not in posn:
state = "m"
elif posn[0] is None:
state = "x"
else:
state = "y"
if not old_gap:
state = state.upper()
self.states.append(state)
def get_pog(self):
jumps = []
gapmap = {}
ingap = False
# Build a list of gaps (ie: segments of X or Y state) in
# the alignment and a dict which maps from seq posn to the
# start of the surrounding gap.
for (i, state) in enumerate(self.states + ["."]):
gap = state in "XYxy"
if gap and not ingap:
start = i
ingap = True
elif ingap and not gap:
jumps.append((start, i))
ingap = False
if ingap:
gapmap[i] = start
# in case of tail gap
for (dim, child) in enumerate(self.children):
pos = len(child)
self.remap[dim][pos] = len(self.aligned_positions)
# Keep only those child gaps which sit entirely within a gap
# in this alignment
child_jumps = []
for (dim, pog) in enumerate(self.children):
r = self.remap[dim]
for (i, j) in pog.jumps:
(i, j) = (r[i], r[j])
if i in gapmap and j in gapmap and gapmap[i] == gapmap[j]:
child_jumps.append((i, j))
pog = POG(len(self.aligned_positions), jumps, child_jumps)
pog.aligned_positions = self.aligned_positions
pog.states = "".join(self.states)
return pog
class POG(object):
"""A representation of the indel positions in a pairwise alignment, ie:
those segments of the consensus sequence which may be inserts and so absent
from the common ancestor. Nearly equivalent to a generic Partial Order
Graph.
Indels are represented as tuples of
(1st posn in indel, 1st posn after indel)
Two lists of indels are kept, one for indels in the alignment, and one
for indels in its two children in case they are also alignments.
This data structure largely inspired by:
<NAME>, <NAME>. 2005. An algorithm for progressive multiple
alignment of sequences with insertions. PNAS 102:10557-10562
"""
def __init__(self, length, jumps, child_jumps):
self.jumps = jumps
self.child_jumps = child_jumps
self.all_jumps = self.jumps + self.child_jumps
self.all_jumps.sort(key=lambda i_j: i_j[1])
self.length = length
for (i, j) in self.all_jumps:
assert i <= j, (length, jumps, child_jumps)
assert 0 <= i <= length, (length, jumps, child_jumps)
assert 0 <= j <= length, (length, jumps, child_jumps)
def traceback(self, other, aligned_positions):
return pog_traceback([self, other], aligned_positions)
def as_list_of_pred_lists(self):
"""A representation of the POG as a list of predecessor positions,
a simple way to represent DAGs eg: [], [0], [1] would be a simple
sequence of length 3. Extra start and end positions are added, so
the length is len(self)+2 and the positions are all offset by 1"""
result = [[]]
# First the regular, linear sequence relationships
for i in range(self.length + 1):
pre = [i]
result.append(pre)
# Then add in the indel jumps. Given an indel from i to j
# j could have been ajacent to one of i's predecessors in
# the ancestral sequence. This depends on all_jumps being sorted
# by j.
for (i, j) in self.all_jumps:
if i == j:
continue
assert i < j
result[j + 1].extend(result[i + 1])
return result
def get_aligned_positions(self):
return self.aligned_positions
def get_full_aligned_positions(self):
return self.aligned_positions
def __len__(self):
return self.length
def midlinks(self):
# for the hirchberg algorithm.
half = self.length // 2
jumps = [(i, j) for (i, j) in self.all_jumps if i <= half and j >= half]
return [(half, half)] + jumps
def __getitem__(self, index):
# POGs need to be sliceable for the hirchberg algorithm.
if index.start is None:
start = 0
else:
start = index.start
if index.stop is None:
end = self.length
else:
end = index.stop
assert end >= start, (start, end, index, self.length)
def moved(i, j):
i2 = max(min(i, end), start) - start
j2 = max(min(j, end), start) - start
return (i2, j2)
jumps = [moved(i, j) for (i, j) in self.jumps if i < end or j > start]
cjumps = [moved(i, j) for (i, j) in self.child_jumps if i < end or j > start]
return POG(end - start, jumps, cjumps)
def backward(self):
# Switches predecessors / successors
# POGs need to be reversable for the hirchberg algorithm.
length = self.length
jumps = [(length - j, length - i) for (i, j) in self.jumps]
cjumps = [(length - j, length - i) for (i, j) in self.child_jumps]
return POG(length, jumps, cjumps)
def write_to_dot(self, dot):
pred_sets = self.as_list_of_pred_lists()
print("digraph POG {", file=dot)
for (i, preds) in enumerate(pred_sets):
# print i, preds
for pred in preds:
print(" ", (f"node{pred} -> node{i}"), file=dot)
if i == 0:
label = "START"
elif i == len(pred_sets) - 1:
label = "END"
else:
label = str(i)
print(" ", (f"node{i}"), f'[label="{label}"]', file=dot)
print("}", file=dot)
print("", file=dot)
class LeafPOG(POG):
"""The POG for a known sequence contains no indels."""
def __init__(self, length):
self.length = length
self.all_jumps = []
self.jumps = []
def as_list_of_pred_lists(self):
pog = [[[i]] for i in range(self.length)]
return [[]] + pog + [[len(pog)]]
def __len__(self):
return self.length
def backward(self):
return LeafPOG(self.length)
def leaf2pog(leaf):
return LeafPOG(len(leaf))
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from __future__ import unicode_literals
import re
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
from docutils import nodes, transforms
# -- Project information -----------------------------------------------------
project = 'Turbinia'
copyright = '2020, Google Inc'
author = 'Turbinia maintainers'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',
'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',
'recommonmark'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# The master toctree document.
master_doc = 'index'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {
'**': [
'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',
'searchbox.html'
]
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'turbiniadoc'
html_logo = "images/turbinia-logo.jpg"
class ProcessLink(transforms.Transform):
"""Transform definition to parse .md references to internal pages."""
default_priority = 1000
def find_replace(self, node):
"""Parses URIs containing .md and replaces them with their HTML page."""
if isinstance(node, nodes.reference) and 'refuri' in node:
r = node['refuri']
if r.endswith('.md'):
r = r[:-3] + '.html'
node['refuri'] = r
return node
def traverse(self, node):
"""Traverse the document tree rooted at node.
node : docutil node
current root node to traverse
"""
self.find_replace(node)
for c in node.children:
self.traverse(c)
# pylint: disable=arguments-differ,attribute-defined-outside-init
# this was taken from GRR's config file for documentation
def apply(self):
self.current_level = 0
self.traverse(self.document)
def setup(app):
"""Add custom parsers to Sphinx generation."""
app.add_config_value(
'recommonmark_config', {
'enable_auto_doc_ref': False,
}, True)
app.add_transform(AutoStructify)
app.add_transform(ProcessLink)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from itertools import chain
from pants.backend.java.dependency_inference.rules import (
JavaInferredDependencies,
JavaInferredDependenciesAndExportsRequest,
)
from pants.backend.java.dependency_inference.rules import rules as java_dep_inference_rules
from pants.backend.java.target_types import JavaFieldSet, JavaGeneratorFieldSet, JavaSourceField
from pants.core.util_rules.archive import ZipBinary
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.fs import EMPTY_DIGEST, CreateDigest, Digest, Directory, MergeDigests, Snapshot
from pants.engine.process import BashBinary, FallibleProcessResult, Process, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import SourcesField
from pants.engine.unions import UnionMembership, UnionRule
from pants.jvm.classpath import Classpath
from pants.jvm.compile import (
ClasspathEntry,
ClasspathEntryRequest,
CompileResult,
FallibleClasspathEntry,
)
from pants.jvm.compile import rules as jvm_compile_rules
from pants.jvm.jdk_rules import JdkSetup
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
class CompileJavaSourceRequest(ClasspathEntryRequest):
field_sets = (JavaFieldSet, JavaGeneratorFieldSet)
@rule(desc="Compile with javac")
async def compile_java_source(
bash: BashBinary,
jdk_setup: JdkSetup,
zip_binary: ZipBinary,
union_membership: UnionMembership,
request: CompileJavaSourceRequest,
) -> FallibleClasspathEntry:
# Request the component's direct dependency classpath, and additionally any prerequisite.
classpath_entry_requests = [
*((request.prerequisite,) if request.prerequisite else ()),
*(
ClasspathEntryRequest.for_targets(
union_membership, component=coarsened_dep, resolve=request.resolve
)
for coarsened_dep in request.component.dependencies
),
]
direct_dependency_classpath_entries = FallibleClasspathEntry.if_all_succeeded(
await MultiGet(
Get(FallibleClasspathEntry, ClasspathEntryRequest, cpe)
for cpe in classpath_entry_requests
)
)
if direct_dependency_classpath_entries is None:
return FallibleClasspathEntry(
description=str(request.component),
result=CompileResult.DEPENDENCY_FAILED,
output=None,
exit_code=1,
)
# Capture just the `ClasspathEntry` objects that are listed as `export` types by source analysis
deps_to_classpath_entries = dict(
zip(request.component.dependencies, direct_dependency_classpath_entries or ())
)
# Re-request inferred dependencies to get a list of export dependency addresses
inferred_dependencies = await MultiGet(
Get(
JavaInferredDependencies,
JavaInferredDependenciesAndExportsRequest(tgt[JavaSourceField]),
)
for tgt in request.component.members
if JavaFieldSet.is_applicable(tgt)
)
flat_exports = {export for i in inferred_dependencies for export in i.exports}
export_classpath_entries = [
classpath_entry
for coarsened_target, classpath_entry in deps_to_classpath_entries.items()
if any(m.address in flat_exports for m in coarsened_target.members)
]
# Then collect the component's sources.
component_members_with_sources = tuple(
t for t in request.component.members if t.has_field(SourcesField)
)
component_members_and_source_files = zip(
component_members_with_sources,
await MultiGet(
Get(
SourceFiles,
SourceFilesRequest(
(t.get(SourcesField),),
for_sources_types=(JavaSourceField,),
enable_codegen=True,
),
)
for t in component_members_with_sources
),
)
component_members_and_java_source_files = [
(target, sources)
for target, sources in component_members_and_source_files
if sources.snapshot.digest != EMPTY_DIGEST
]
if not component_members_and_java_source_files:
# Is a generator, and so exports all of its direct deps.
exported_digest = await Get(
Digest, MergeDigests(cpe.digest for cpe in direct_dependency_classpath_entries)
)
classpath_entry = ClasspathEntry.merge(exported_digest, direct_dependency_classpath_entries)
return FallibleClasspathEntry(
description=str(request.component),
result=CompileResult.SUCCEEDED,
output=classpath_entry,
exit_code=0,
)
dest_dir = "classfiles"
dest_dir_digest = await Get(
Digest,
CreateDigest([Directory(dest_dir)]),
)
merged_digest = await Get(
Digest,
MergeDigests(
(
dest_dir_digest,
*(
sources.snapshot.digest
for _, sources in component_members_and_java_source_files
),
)
),
)
usercp = "__cp"
user_classpath = Classpath(direct_dependency_classpath_entries)
classpath_arg = ":".join(user_classpath.root_immutable_inputs_args(prefix=usercp))
immutable_input_digests = {
**jdk_setup.immutable_input_digests,
**dict(user_classpath.root_immutable_inputs(prefix=usercp)),
}
# Compile.
compile_result = await Get(
FallibleProcessResult,
Process(
argv=[
*jdk_setup.args(bash, [f"{jdk_setup.java_home}/lib/tools.jar"]),
"com.sun.tools.javac.Main",
*(("-cp", classpath_arg) if classpath_arg else ()),
"-d",
dest_dir,
*sorted(
chain.from_iterable(
sources.snapshot.files
for _, sources in component_members_and_java_source_files
)
),
],
input_digest=merged_digest,
immutable_input_digests=immutable_input_digests,
use_nailgun=jdk_setup.immutable_input_digests.keys(),
append_only_caches=jdk_setup.append_only_caches,
env=jdk_setup.env,
output_directories=(dest_dir,),
description=f"Compile {request.component} with javac",
level=LogLevel.DEBUG,
),
)
if compile_result.exit_code != 0:
return FallibleClasspathEntry.from_fallible_process_result(
str(request.component),
compile_result,
None,
)
# Jar.
# NB: We jar up the outputs in a separate process because the nailgun runner cannot support
# invoking via a `bash` wrapper (since the trailing portion of the command is executed by
# the nailgun server). We might be able to resolve this in the future via a Javac wrapper shim.
output_snapshot = await Get(Snapshot, Digest, compile_result.output_digest)
output_file = f"{request.component.representative.address.path_safe_spec}.javac.jar"
output_files: tuple[str, ...] = (output_file,)
if output_snapshot.files:
jar_result = await Get(
ProcessResult,
Process(
argv=[
bash.path,
"-c",
" ".join(
["cd", dest_dir, ";", zip_binary.path, "-r", f"../{output_file}", "."]
),
],
input_digest=compile_result.output_digest,
output_files=output_files,
description=f"Capture outputs of {request.component} for javac",
level=LogLevel.TRACE,
),
)
jar_output_digest = jar_result.output_digest
else:
# If there was no output, then do not create a jar file. This may occur, for example, when compiling
# a `package-info.java` in a single partition.
output_files = ()
jar_output_digest = EMPTY_DIGEST
output_classpath = ClasspathEntry(
jar_output_digest, output_files, direct_dependency_classpath_entries
)
if export_classpath_entries:
merged_export_digest = await Get(
Digest,
MergeDigests((output_classpath.digest, *(i.digest for i in export_classpath_entries))),
)
merged_classpath = ClasspathEntry.merge(
merged_export_digest, (output_classpath, *export_classpath_entries)
)
output_classpath = merged_classpath
return FallibleClasspathEntry.from_fallible_process_result(
str(request.component),
compile_result,
output_classpath,
)
def rules():
return [
*collect_rules(),
*java_dep_inference_rules(),
*jvm_compile_rules(),
UnionRule(ClasspathEntryRequest, CompileJavaSourceRequest),
]
|
<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import nested_scopes
#import findspark
#findspark.init()
# In[2]:
import re
import os
import pandas
pandas.set_option('display.max_rows', None)
# In[3]:
import threading
import collections
import gzip
# In[4]:
import pyspark
import pyspark.sql
from pyspark.sql import SparkSession
from pyspark.sql.types import (StructType, StructField, DateType,
TimestampType, StringType, LongType, IntegerType, DoubleType,FloatType)
from pyspark.sql.functions import to_date, floor
from pyspark.ml.feature import StringIndexer, VectorAssembler, OneHotEncoder
from pyspark.ml import Pipeline
from pyspark.sql.functions import lit
import time, timeit
from pyspark.storagelevel import StorageLevel
from pyspark.sql.window import Window
from pyspark.sql.functions import rank, col
# In[5]:
from pyspark.ml import Pipeline
import pandas
import numpy as np
# In[6]:
import math
from functools import reduce
import json
# In[7]:
from pyspark.sql.types import *
from pyspark.sql import functions as F
from datetime import date
# In[8]:
#os.environ['PYSPARK_SUBMIT_ARGS'] = '--jars /home/yuzhou/.m2/repository/ml/dmlc/xgboost4j-spark_2.11/distr_opt/xgboost4j-spark_2.11-distr_opt.jar,/home/yuzhou/.m2/repository/ml/dmlc/xgboost4j_2.11/distr_opt/xgboost4j_2.11-distr_opt.jar pyspark-shell'
#os.environ['PYSPARK_SUBMIT_ARGS'] = '--jars /home/yuzhou/.m2/repository/ml/dmlc/xgboost4j-spark_2.11/1.0.0-SNAPSHOT/xgboost4j-spark_2.11-1.0.0-SNAPSHOT.jar,/home/yuzhou/.m2/repository/ml/dmlc/xgboost4j_2.11/1.0.0-SNAPSHOT/xgboost4j_2.11-1.0.0-SNAPSHOT.jar pyspark-shell'
#os.environ['PYSPARK_SUBMIT_ARGS'] = '--jars /home/yuzhou/.m2/repository/ml/dmlc/xgboost4j-spark/0.82/xgboost4j-spark-0.82.jar,/home/yuzhou/.m2/repository/ml/dmlc/xgboost4j/0.82/xgboost4j-0.82.jar pyspark-shell'
#os.environ['PYSPARK_SUBMIT_ARGS'] = '--jars /home/yuzhou/.m2/repository/ml/dmlc/xgboost4j-spark_2.11/1.0.0-SNAPSHOT-master-pr-4824/xgboost4j-spark_2.11-1.0.0-SNAPSHOT-master-pr-4824.jar,/home/yuzhou/.m2/repository/ml/dmlc/xgboost4j_2.11/1.0.0-SNAPSHOT-master-pr-4824/xgboost4j_2.11-1.0.0-SNAPSHOT-master-pr-4824.jar pyspark-shell'
os.environ['PYSPARK_SUBMIT_ARGS'] = '--master yarn --jars /home/xgboost/.m2/repository/ml/dmlc/xgboost4j-spark_2.12/1.1.0-SNAPSHOT/xgboost4j-spark_2.12-1.1.0-SNAPSHOT.jar,/home/xgboost/.m2/repository/ml/dmlc/xgboost4j_2.12/1.1.0-SNAPSHOT/xgboost4j_2.12-1.1.0-SNAPSHOT.jar --conf "spark.executor.extraLibraryPath=/home/xgboost/install/OneCCL/oneccl/build/_install/lib" pyspark-shell'
#os.environ['PYSPARK_SUBMIT_ARGS'] = '--jars /home/xgboost/.m2/repository/ml/dmlc/xgboost4j/0.82/xgboost4j-0.82.jar,/home/xgboost/.m2/repository/ml/dmlc/xgboost4j/1.0.0-SNAPSHOT/xgboost4j_2.12-1.0.0-SNAPSHOT.jar pyspark-shell'
#/home/xgboost/.m2/repository/ml/dmlc/xgboost4j/0.82/xgboost4j-0.82.jar
#Dev
#os.environ['OMP_NUM_THREADS'] = '72'
# In[22]:
RANDOM_SEED = 42
clients = ["sr243"]
# In[9]:
# To run on multiple nodes:
'''executors_per_node = 7
nodes=len(clients)
cores_per_executor=8
task_per_core=8
'''
# In[10]:
# to test:
nodes=1
executors_per_node = 2
cores_per_executor = 1
task_per_core = 1
cache_size=50
total_size=340000
print('executor per node: {:d}\nparallelism: {:d}\nmemory: {:d}m\noffheap:{:d}m'.format(executors_per_node,nodes*executors_per_node*cores_per_executor*task_per_core,int(math.floor(nodes*total_size/(nodes*executors_per_node)))-1024-int(math.floor(cache_size*1024/(nodes*executors_per_node))),int(math.floor(cache_size*1024/(nodes*executors_per_node)))))
# In[11]:
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
# In[40]:
conf = SparkConf()\
.set('spark.default.parallelism', '{:d}'.format(nodes*executors_per_node*cores_per_executor*task_per_core))\
.set('spark.executor.instances', '{:d}'.format(executors_per_node*nodes))\
.set('spark.files.maxPartitionBytes', '256m')\
.set('spark.app.name', 'pyspark_final-xgboost-0.90-DMLC')\
.set('spark.rdd.compress', 'False')\
.set('spark.serializer','org.apache.spark.serializer.KryoSerializer')\
.set('spark.executor.cores','{:d}'.format(cores_per_executor))\
.set('spark.executor.memory', '{:d}m'.format(int(math.floor(nodes*total_size/(nodes*executors_per_node)))-1024-int(math.floor(cache_size*1024/(nodes*executors_per_node)))))\
.set('spark.task.cpus','{:d}'.format(cores_per_executor))\
.set('spark.driver.memory','24g')\
.set('spark.memory.offHeap.enabled','True')\
.set('spark.memory.offHeap.size','{:d}m'.format(int(math.floor(cache_size*1024/(nodes*executors_per_node)))))\
.set('spark.executor.memoryOverhead','{:d}m'.format(int(math.floor(cache_size*1024/(nodes*executors_per_node)))+3000))\
.set('spark.sql.join.preferSortMergeJoin','False')\
.set('spark.memory.storageFraction','0.5')\
.set('spark.executor.extraJavaOptions','-XX:+UseParallelGC -XX:+UseParallelOldGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -DCCL_ATL_TRANSPORT=ofi -DCCL_WORLD_SIZE=1 -DCCL_PM_TYPE=resizable -DCCL_KVS_IP_EXCHANGE=env -DCCL_KVS_IP_PORT=10.0.0.143_9877 -DWORK_DIR=/home/xgboost/install/OneCCL/oneccl/build/_install/env -DCCL_ROOT=/home/xgboost/install/OneCCL/oneccl/build/_install -DI_MPI_ROOT=/home/xgboost/install/OneCCL/oneccl/build/_install -DCCL_ATL_TRANSPORT_PATH=/home/xgboost/install/OneCCL/oneccl/build/_install/lib -DFI_PROVIDER_PATH=/home/xgboost/install/OneCCL/oneccl/build/_install/lib/prov')\
.set('spark.driver.maxResultSize', 0)\
.set('spark.eventLog.dir', '/home/yuzhou/spark_local')\
.set('spark.executor.extraLibraryPath', '/home/xgboost/install/OneCCL/oneccl/build/_install/lib')\
.set('spark.driver.extraClassPath', '/home/xgboost/.m2/repository/ml/dmlc/xgboost4j_2.12/1.1.0-SNAPSHOT/xgboost4j_2.12-1.1.0-SNAPSHOT.jar')\
.set('spark.executor.extraClassPath', '/home/xgboost/.m2/repository/ml/dmlc/xgboost4j_2.12/1.1.0-SNAPSHOT/xgboost4j_2.12-1.1.0-SNAPSHOT.jar')\
.setExecutorEnv('CCL_PM_TYPE','resizable')\
.setExecutorEnv('CCL_ATL_TRANSPORT','ofi')\
.setExecutorEnv('CCL_KVS_IP_EXCHANGE','env')\
.setExecutorEnv('CCL_KVS_IP_PORT','10.0.0.143_9877')\
.setExecutorEnv('CCL_ROOT','/home/xgboost/install/OneCCL/oneccl/build/_install')\
.setExecutorEnv('CCL_WORLD_SIZE',1)\
.setExecutorEnv('I_MPI_ROOT','/home/xgboost/install/OneCCL/oneccl/build/_install')\
.setExecutorEnv('CCL_ATL_TRANSPORT_PATH','/home/xgboost/install/OneCCL/oneccl/build/_install/lib')\
.setExecutorEnv('FI_PROVIDER_PATH','/home/xgboost/install/OneCCL/oneccl/build/_install/lib/prov')
'''
conf = SparkConf() .set('spark.default.parallelism',
f'{nodes*executors_per_node*cores_per_executor*task_per_core}')
.set('spark.executor.instances', '{:d}'.format(executors_per_node*nodes))
.set('spark.files.maxPartitionBytes', '256m') .set('spark.app.name',
'pyspark_final-xgboost-0.90-DMLC') .set('spark.rdd.compress', 'False')
.set('spark.serializer','org.apache.spark.serializer.KryoSerializer')
.set('spark.executor.cores','{:d}'.format(cores_per_executor))
.set('spark.executor.memory',
'{:d}m'.format(int(math.floor(nodes*total_size/(nodes*executors_per_node)))-1024-int(math.floor(cache_size*1024/(nodes*executors_per_node)))))
.set('spark.task.cpus',f'{cores_per_executor}')
.set('spark.driver.memory','24g') .set('spark.memory.offHeap.enabled','True')
.set('spark.memory.offHeap.size','{:d}m'.format(int(math.floor(cache_size*1024/(nodes*executors_per_node)))))
.set('spark.executor.memoryOverhead','{:d}m'.format(int(math.floor(cache_size*1024/(nodes*executors_per_node)))+3000))
.set('spark.sql.join.preferSortMergeJoin','False')
.set('spark.memory.storageFraction','0.5')
.set('spark.executor.extraJavaOptions', '-XX:+UseParallelGC
-XX:+UseParallelOldGC -verbose:gc -XX:+PrintGCDetails
-XX:+PrintGCTimeStamps') .set('spark.driver.maxResultSize', 0)
.set('spark.eventLog.dir', '/home/yuzhou/spark_local')
'''
#.set('spark.kryoserializer.buffer.max', '2048m')
#spark.driver.maxResultSize it was "3g". Now it is unlimited
# In[42]:
#sc.stop()
#sc = SparkContext(conf=conf,master='yarn')
# To run on local node, single node distributed mode:
sc = SparkContext(conf=conf,master='yarn')
sc.setLogLevel('INFO')
spark = SQLContext(sc)
sc.addPyFile('/home/xgboost/install/xgb/sparkxgb_0.83.zip')
time.sleep(10)
# In[ ]:
# loading and splitting data:
# replication != 1 so it is NOT guaranteed that each partition receives the same data
#df = spark.read.format('parquet').load('hdfs://sr507/user/yuzhou/xgboost_36_files.parquet')
#df = spark.read.format('parquet').load('hdfs://sr507/user/yuzhou/xgboost_3.5G.parquet') # was using this one
# replication=1 so it is guaranteed that each partition receives the same data
#df = spark.read.format('parquet').load('hdfs://sr507/user/yuzhou/xgboost_36_files_1rep.parquet')
# For comparison agains Rabit single-node without Spark:
df = spark.read.format('parquet').load('hdfs://10.1.0.143:9000//smallin/inputData/')
#For PR4824 (09/09/19):
#df = spark.read.format('parquet').load('hdfs://sr507/user/yuzhou/xgboost_36_files.parquet')
#print("Input DF numPartitions = {:d}".format(df.rdd.getNumPartitions()))
df = df.coalesce(executors_per_node*nodes*cores_per_executor)
#print(df.count())
#224 partitions
print('Completed data loading.')
'''
(trainingData, testData) = df.randomSplit([0.9, 0.1], seed = RANDOM_SEED)
trainingData=trainingData.coalesce(executors_per_node*nodes*cores_per_executor)
print('Completed coalesce in training data.')
testData=testData.coalesce(executors_per_node*nodes*cores_per_executor)
print('Completed coalesce in test data.')
trainingData.cache()
testData.cache()
print('trainingData count:', trainingData.count())
print('testData count:', testData.count())
######
print('Completed data spliting.')
# In[ ]:
(tr2, te2) = testData.randomSplit([0.9999, 0.0001], seed = RANDOM_SEED)
tr2=tr2.coalesce(executors_per_node*nodes*cores_per_executor)
print('te2 data count:', te2.count())
'''
# In[ ]:
from sparkxgb import XGBoostClassifier
# # Save and load model
# In[33]:
def run_train_orig(train_data):
t1 = time()
xgboost = XGBoostClassifier(
featuresCol="features",
labelCol="delinquency_12",
numRound=100,
maxDepth=8,
maxLeaves=256,
alpha=0.9,
eta=0.1,
gamma=0.1,
subsample=1.0,
reg_lambda=1.0,
scalePosWeight=2.0,
minChildWeight=30.0,
treeMethod='hist',
objective='reg:linear', #squarederror', #if xgboost v0.82 needs to use 'reg:linear'
growPolicy='lossguide', #depthwise
numWorkers=executors_per_node*nodes*cores_per_executor,
nthread=1,
#evalMetric='logloss' # mconrado added that to test. The log loss is only defined for two or more labels.
)
model = xgboost.fit(train_data)
t2 = time()
print(f"Training total time: {t2-t1}")
return model
def run_train(train_data, params):
t1 = timeit.default_timer()
''' xgboost = XGBoostClassifier(
featuresCol='features',
labelCol='delinquency_12',
numRound=20,
maxDepth=8,
maxLeaves=256,
alpha=0.9,
eta=0.1,
gamma=0.1,
subsample=1.0,
reg_lambda=1.0,
scalePosWeight=2.0,
minChildWeight=30.0,
treeMethod='hist',
objective='reg:linear', #squarederror', #if xgboost v0.82 needs to use 'reg:linear'
growPolicy='lossguide',
numWorkers=executors_per_node*nodes,
nthread=cores_per_executor
)
'''
xgboost = XGBoostClassifier(**params)
model = xgboost.fit(train_data)
t2 = timeit.default_timer()
train_time = t2 - t1
return model, train_time
import sklearn.metrics as metrics
from sklearn.metrics import auc
def run_predict(model, test_data):
t1 = time()
results = model.transform(test_data)
t2 = time()
predict_time = t2-t1
t1 = time()
preds = results.select(results.prediction).collect()
Y_test = results.select(results.delinquency_12).collect()
auc = metrics.roc_auc_score(Y_test, preds)
t2 = time()
#print(f"Conversion data + AUC calculation time: {t2-t1}")
conversion_auccalculation_time = t2-t1
return auc, predict_time, conversion_auccalculation_time
def calc_print_results(spent_time, preds, Y, msg='Results'):
if (preds>1).any()==True:
print('W: It seems predicted values are probabilities. Please convert them.')
err = 1 - metrics.accuracy_score(Y, preds)
auc = metrics.roc_auc_score(Y, preds)
print('{}: \t\t {} \t {} \t {}'.format(msg, err, auc, spent_time))
from sklearn.preprocessing import LabelEncoder
from time import time
def reeval_saved_model(X, Y, model_path, msg='Results'):
loaded_model = xgb.XGBClassifier()
booster = xgb.Booster()
booster.load_model(model_path)
loaded_model._Booster = booster
loaded_model._le = LabelEncoder().fit(np.unique(Y))
#preds_proba = loaded_model.predict_proba(X)
t1 = time()
preds = loaded_model.predict(X)
calc_print_results(time()-t1, preds, Y, msg)
print('Reevaluation of saved model completed:', model_path)
def save_model(model, model_path):
model.nativeBooster.saveModel(model_path)
def save_model_txt(model, model_path):
dump = model.nativeBooster.getModelDump('', False, 'text')
with open(model_path, 'w+') as f:
for s in dump:
f.write(s + '\n')
# Convert pyspark.sql.dataframe.DataFrame TO numpy array
def convert_dfspark_2_nparray(dfspark):
#pandas_df_sample = te2.limit(2).select('delinquency_12').toPandas()
X_pandas_df_sample = dfspark.select('features').toPandas() # trainingData count: 546040147
Y_pandas_df_labels = dfspark.select('delinquency_12').toPandas()
X_ndarray = np.array(X_pandas_df_sample['features'].tolist())
print('Instance:', X_ndarray[0])
Y_ndarray = np.array(Y_pandas_df_labels['delinquency_12'].tolist())
print('Label:', Y_ndarray[0])
print('Size of Y_ndarray:', Y_ndarray.size)
return X_ndarray, Y_ndarray
# In[35]:
#X_ndarray, Y_ndarray = convert_dfspark_2_nparray(tr2)
# In[ ]:
#sc.setLogLevel('ALL')
overall_start_time = timeit.default_timer()
params = {'featuresCol': "features",
'labelCol': "delinquency_12",
'numRound': 100,
'maxDepth': 8,
'maxLeaves': 256,
'alpha': 0.9,
'eta': 0.1,
'gamma': 0.1,
'subsample': 1.0,
'reg_lambda': 1.0,
'scalePosWeight': 2.0,
'minChildWeight': 30.0,
'treeMethod': 'hist',
'objective': 'reg:squarederror', #if xgboost v0.82 needs to use 'reg:linear'. If >= 0.9, uses squarederror
'growPolicy': 'lossguide', #depthwise
'numWorkers': executors_per_node*nodes,
# 'nthread': task_per_core
'nthread':1
# 'verbosity': 3
}
print("XGBoost Parameters: \n", params)
nRuns = 1
for i in range(0, nRuns):
model, train_time = run_train(df, params) #trainingData, params
print('Completed training the model. Time(sec): ', train_time)
# Save model as binary format
'''
model_path = f"/home/yuzhou/notebook/mconrado/results/spark_1node_xgb_trainingData_testData_1scp1thread_sep27_{i}.modelbin"
model_path_txt = f"/home/yuzhou/notebook/mconrado/results/spark_1node_xgb_trainingData_testData_28cpu_1ex28cor_determiFalse_oct1_{i}.txt"
# save_model(model, model_path)
save_model_txt(model, model_path_txt)
print('Saving model step completed.')
auc, predict_time, conversion_auccalculation_time = run_predict(model, testData)
#reeval_saved_model(X_ndarray, Y_ndarray, model_path, 'Using different data to train and test')
print(auc, ",", train_time, ",", predict_time)
print("AUC, training time, prediction time")
'''
print('Overall time for {} runs: {}'.format(nRuns, timeit.default_timer() - overall_start_time))
|
import requests
import json
import pandas as pd
import numpy as np
from requests.auth import HTTPBasicAuth
from datetime import datetime
import time
import re
import sys
import numpy as np
delay_conn = 60
clients = [('your_clinet_id_1', 'your_clinet_secret_1'),
('your_clinet_id_2', 'your_clinet_secret_2'),
('...', '...')]
clients_number = len(clients)
client_index = 0
headers = {}
headers['Accept'] = 'application/vnd.github.starfox-preview+json'
unnormal_requests = []
def get_data_pages(req_url):
page_number = 1
resp_list = []
while(True):
number_of_tries = 10
try:
r = requests.get(req_url + "&page=" + str(page_number), headers=headers)
if(r.ok):
if r.status_code == 200:
result = json.loads(r.text or r.content)
resp_list += result
if not result:
break
if(len(result)<100):
break
page_number += 1
# check for max limit
try:
if int(r.headers["X-RateLimit-Remaining"]) < 10:
print("limit exceeded!!!!!!!!!!!!")
delay = float(r.headers["X-RateLimit-Reset"]) - time.mktime(time.localtime())#.total_seconds()
print('sleeping for '+str(delay)+' seconds...')
print("current time:" + str(datetime.now()))
time.sleep(int(delay))
except (KeyError):
pass
else:
unnormal_requests.append(req_url)
return False,0
else:
j = json.loads(r.text or r.content)
print('\n---'+str(r))
print('\n---'+str(j['message']))
return False,0
except requests.exceptions.Timeout as e:
print("-------timeout-------")
print(e)
number_of_tries-=1
if(number_of_tries):
time.sleep(delay_conn)
get_data_pages(req_url)
else:
sys.exit(1)
except requests.ConnectionError as e:
print("-------connection error-------")
print(e)
number_of_tries-=1
if(number_of_tries):
time.sleep(delay_conn)
get_data_pages(req_url)
else:
sys.exit(1)
return resp_list, page_number
with open("repo_addresses.txt") as f:
repo_addresses = json.loads(f.read())
repos_dict = {}
counter = 0
start_time = datetime.now()
for repo_address in repo_addresses:
print(repo_address)
repo_name = repo_address.split('/')[1]
client_id, client_secret = clients[client_index]
req = f"https://api.github.com/repos/{repo_address}/contributors?per_page=100&client_id={client_id}&client_secret={client_secret}"
cntrb_obj, num_of_pages = get_data_pages(req)
if cntrb_obj:
counter += num_of_pages
client_index += 1
if(client_index == clients_number):
client_index = 0
if counter == 5000*clients_number:
end_time = datetime.now()
duration = (end_time - start_time).seconds
if(duration < 3600):
time.sleep(3600 - duration)
print(f"sleep for {3600 - duration} seconds")
counter = 0
start_time = datetime.now()
d = dict()
for i in cntrb_obj:
d[i["login"]] = i["contributions"]
repos_dict[repo_name] = d
with open(f"data/repo_cntrb.txt", 'w') as f:
f.write(json.dumps(repos_dict , indent = 4))
print("---finished---")
print(unnormal_requests) |
<gh_stars>0
from bs4 import BeautifulSoup
import requests
from googlesearch import search
import whois_script
import geolocation_script
import re
# all us states
us_states = ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut', 'Delaware', 'Florida', 'Georgia', 'Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky', 'Louisiana', 'Maine', 'Maryland', 'Massachusetts', 'Michigan', 'Minnesota', 'Mississippi', 'Missouri', 'Montana', 'Nebraska', 'Nevada', 'NewHampshire', 'NewJersey', 'NewMexico', 'NewYork', 'NorthCarolina', 'NorthDakota', 'Ohio', 'Oklahoma', 'Oregon', 'Pennsylvania', 'RhodeIsland', 'SouthCarolina', 'SouthDakota', 'Tennessee', 'Texas', 'Utah', 'Vermont', 'Virginia', 'Washington', 'WestVirginia', 'Wisconsin', 'Wyoming']
us_states_with_spaces = ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut', 'Delaware', 'Florida', 'Georgia', 'Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky', 'Louisiana', 'Maine', 'Maryland', 'Massachusetts', 'Michigan', 'Minnesota', 'Mississippi', 'Missouri', 'Montana', 'Nebraska', 'Nevada', 'New Hampshire', 'New Jersey', 'New Mexico', 'New York', 'North Carolina', 'North Dakota', 'Ohio', 'Oklahoma', 'Oregon', 'Pennsylvania', 'Rhode Island', 'South Carolina', 'South Dakota', 'Tennessee', 'Texas', 'Utah', 'Vermont', 'Virginia', 'Washington', 'West Virginia', 'Wisconsin', 'Wyoming']
def getURL(companyName, State):
#try:
term = ' '.join([companyName, State, "contact", "company"])
print(term)
urls=[]
for url in search(term, num_results=5):
print(url)
urls.append(url)
return(urls[0])
def getTenDigits(numbers):
count = 0
digits = "1234567890"
for num in numbers:
for x in num:
if x in digits:
count+=1
if count == 10:
return num
else:
count = 0
return "Unable to determine number."
def contact_func_solo(address):
# creating driver
owner = whois_script.whois_func(address)
# parsing together search query
location = geolocation_script.geolocation_func(address)
location = location.replace(' ', '').split(',')
state = (set(location) & set(us_states)).pop()
state = us_states_with_spaces[us_states.index(state)]
profile_url = getURL(owner, state)
# this will open the link
src = requests.get(profile_url).text
# getting page source to parse
soup = BeautifulSoup(src, 'lxml')
# finding overview section
# https://stackoverflow.com/a/3868861/15164646
match_phone = re.findall(r'((?:\+\d{2}[-\.\s]??|\d{4}[-\.\s]??)?(?:\d{3}[-\.\s]??\d{3}[-\.\s]??\d{4}|\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4}))', src)
contact = getTenDigits(match_phone)
return contact + " - " + profile_url
def contact_func_all(address, whois, geolocation):
# creating driver
owner = whois
# parsing together search query
location = geolocation
location = location.replace(' ', '').split(',')
state = (set(location) & set(us_states)).pop()
state = us_states_with_spaces[us_states.index(state)]
profile_url = getURL(owner, state)
# this will open the link
src = requests.get(profile_url).text
# getting page source to parse
soup = BeautifulSoup(src, 'lxml')
# finding overview section
# https://stackoverflow.com/a/3868861/15164646
match_phone = re.findall(r'((?:\+\d{2}[-\.\s]??|\d{4}[-\.\s]??)?(?:\d{3}[-\.\s]??\d{3}[-\.\s]??\d{4}|\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4}))', src)
contact = getTenDigits(match_phone)
return contact + " - " + profile_url
|
<reponame>30blay/pyamplitude<filename>pyamplitude/tests/test_amplituderestapi.py
# !/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from pyamplitude.apiresources import ProjectsHandler
from pyamplitude.amplituderestapi import AmplitudeRestApi
class Test_AmplitudeRestApi(unittest.TestCase):
"""
Intended to be a general test veryfing the data structure from expected
responses from those documented at the official Amplitude API
documentation.
For more information, please read:
https://amplitude.zendesk.com/hc/en-us/articles/205469748-Dashboard-Rest-API-Export-Amplitude-Dashboard-Data
Last update: Octobre 2017
"""
def setUp(self):
# Note: Complete with project name, api key and secret key...
myAPP = ProjectsHandler(project_name = '',
api_key = '',
secret_key = '')
self.apiconector = AmplitudeRestApi(project_handler = myAPP,
show_logs = True,
log_query_cost = False)
def _sanity_check(self, response):
""" General assertion tests for each Amplitude REST requests"""
self.assertTrue(response != None)
self.assertTrue(isinstance(response, dict), 'error: response different from dict type')
for element in response:
self.assertTrue(isinstance(element, unicode),'error: response is not unicode')
def _get_response_types(self, response):
types = []
for x in response:
types.append(type(response[x]))
return types
def test_get_active_and_new_user_count(self):
result = self.apiconector.get_active_and_new_user_count(start = '20170701',
end = '20170702',
m = 'active',
interval = 1,
segment_definitions = None,
group_by = None)
self.assertEqual(len(result), 1)
self._sanity_check(response=result)
expected_types = ['dict']
response_types = self._get_response_types(result)
aux = 0
for type_element in zip(expected_types,response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
def test_get_session_length_distribution(self):
result = self.apiconector.get_session_length_distribution(start = '20170701',
end = '20170702')
self._sanity_check(response=result)
expected_keys = ['withSets', 'novaRuntime', 'novaCost', 'novaRequestDuration',
'wasCached', 'minSampleRate', 'timeComputed', 'cacheFreshness', 'data', 'transformationIds']
response_keys = result.keys()
aux = 0
for element in zip(sorted(response_keys), sorted(expected_keys)):
self.assertTrue(str(element[aux]) == str(element[aux + 1]))
expected_types = ['bool','int','int','int','bool','int','int','unicode','dict','list']
response_types = self._get_response_types(result)
aux = 0
for type_element in zip(expected_types,response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
def test_get_average_session_length(self):
result = self.apiconector.get_average_session_length(start = '20170701',
end = '20170702')
self._sanity_check(response=result)
expected_keys = ['withSets','novaRuntime','novaCost','novaRequestDuration',
'wasCached','minSampleRate','timeComputed','cacheFreshness','data', 'transformationIds']
response_keys = result.keys()
aux = 0
for element in zip(sorted(response_keys),sorted(expected_keys)):
self.assertTrue(str(element[aux]) == str(element[aux+1]))
expected_types = ['bool', 'int', 'int', 'int', 'bool', 'int', 'int', 'unicode', 'dict', 'list']
response_types = self._get_response_types(result)
aux = 0
for type_element in zip(expected_types,response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
def test_get_user_composition(self):
result = self.apiconector.get_user_composition(start = '20170701',
end = '20170702',
proper = ['country','paying'])
self._sanity_check(response=result)
expected_keys = ['queryIds','novaRuntime','novaCost','novaRequestDuration','wasCached','minSampleRate','timeComputed',
'cacheFreshness','data','transformationIds']
response_keys = result.keys()
aux = 0
for element in zip(sorted(response_keys),sorted(expected_keys)):
self.assertTrue(str(element[aux]) == str(element[aux+1]))
expected_types = ['list','int','int','int','bool','int','int','unicode','dict','list']
response_types = self._get_response_types(result)
aux = 0
for type_element in zip(expected_types,response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
def test_get_events(self):
result = self.apiconector.get_events(start = '20170701',
end = '20170702',
event_name = [],
mode = 'totals',
interval = '1')
self._sanity_check(response=result)
expected_keys = ['novaRuntime', 'novaCost', 'novaRequestDuration', 'wasCached', 'timeComputed', 'cacheFreshness', 'data',
'transformationIds']
response_keys = result.keys()
aux = 0
for element in zip(sorted(response_keys), sorted(expected_keys)):
self.assertTrue(str(element[aux]) == str(element[aux + 1]))
expected_types = ['int', 'int', 'int', 'bool', 'int', 'unicode', 'dict', 'list']
response_types = self._get_response_types(result)
aux = 0
for type_element in zip(expected_types, response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
def test_get_event_list(self):
result = self.apiconector.get_event_list()
self._sanity_check(response=result)
expected_types = ['int','int','int','bool','int','unicode','list','list']
response_types = self._get_response_types(result)
aux = 0
for type_element in zip(expected_types,response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
expected_keys = ['novaRuntime', 'novaCost', 'novaRequestDuration', 'wasCached', 'timeComputed',
'cacheFreshness','data', 'transformationIds']
response_keys = result.keys()
aux = 0
for type_element in zip(expected_types,response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
def test_get_user_activity(self):
result = self.apiconector.get_user_activity(user='12345',offset='',limit='100')
self._sanity_check(response=result)
expected_types = ['dict', 'list']
response_types = self._get_response_types(result)
aux = 0
for type_element in zip(expected_types,response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
expected_keys = ['userData', 'events']
response_keys = result.keys()
aux = 0
for type_element in zip(expected_types,response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
def test_get_user_search(self):
result = self.apiconector.get_user_search(user='12345')
self._sanity_check(response=result)
expected_types = ['list', 'unicode']
response_types = self._get_response_types(result)
aux = 0
for type_element in zip(expected_types,response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
expected_keys = ['matches', 'type']
response_keys = result.keys()
aux = 0
for element in zip(sorted(response_keys), sorted(expected_keys)):
self.assertTrue(str(element[aux]) == str(element[aux + 1]))
def test_get_revenue_analysis(self):
result = self.apiconector.get_revenue_analysis(start='20170701',
end='20170702',
m='total',
interval=1,
segment_definitions=None,
group_by=None)
self._sanity_check(response=result)
expected_types = ['list', 'int','int','int', 'bool','int','int','unicode','dict','list']
response_types = self._get_response_types(result)
aux = 0
for type_element in zip(expected_types,response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
expected_keys = ['queryIds', 'novaRuntime', 'novaCost', 'novaRequestDuration', 'wasCached', 'minSampleRate',
'timeComputed', 'cacheFreshness', 'data', 'transformationIds']
response_keys = result.keys()
aux = 0
for element in zip(sorted(response_keys), sorted(expected_keys)):
self.assertTrue(str(element[aux]) == str(element[aux + 1]))
def test_get_realtime_active_users(self):
result = self.apiconector.get_realtime_active_users(interval=5)
self._sanity_check(response=result)
expected_types = ['type', 'dict']
response_types = self._get_response_types(result)
aux = 0
for type_element in zip(expected_types,response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
expected_keys = ['data']
response_keys = result.keys()
aux = 0
for element in zip(sorted(response_keys), sorted(expected_keys)):
self.assertTrue(str(element[aux]) == str(element[aux + 1]))
def test_get_annotations(self):
result = self.apiconector.get_annotations()
self._sanity_check(response=result)
expected_types = ['list']
response_types = self._get_response_types(result)
aux = 0
for type_element in zip(expected_types,response_types):
self.assertTrue(str(type_element[aux]) in str(type_element[aux + 1]))
expected_keys = ['data']
response_keys = result.keys()
aux = 0
for element in zip(sorted(response_keys), sorted(expected_keys)):
self.assertTrue(str(element[aux]) == str(element[aux + 1]))
if __name__ == '__main__':
unittest.main()
|
import base64
import hashlib
import json
import requests
from Cryptodome import Random
from Cryptodome.Cipher import AES
class ProtectedTextApi:
def __init__(self, site_id, passwd):
self.siteHash = hashlib.sha512(("/" + site_id).encode("latin1")).hexdigest()
self.pas = passwd
self.passHash = hashlib.sha512(passwd.encode("latin1")).hexdigest()
self.endpoint = "https://www.protectedtext.com/" + site_id
self.siteObj = (requests.get(self.endpoint + "?action=getJSON")).json()
self.dbversion = self.siteObj["currentDBVersion"]
self.rawtext = decrypt(self.siteObj["eContent"], self.pas.encode())
self.rawtext = self.rawtext[:len(self.rawtext) - 128]
def save(self, textToSave):
encript = str(textToSave + self.siteHash)
textEncrypted = encrypt(encript, self.pas.encode())
postdata = {"initHashContent": self.getWritePermissionProof(self.rawtext),
"currentHashContent": self.getWritePermissionProof(textToSave), "encryptedContent": textEncrypted,
"action": "save"}
ret = (requests.post(self.endpoint, data=postdata, headers={
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/73.0.3683.75 Safari/537.36 "
})).text
self.rawtext = textToSave
return ret
def deleteSite(self):
inithashcontent = self.getWritePermissionProof(self.rawtext)
deleteAction = {"initHashContent": inithashcontent, "action": "delete"}
return (requests.post(self.endpoint, deleteAction)).json()
def view(self):
return self.rawtext
def getWritePermissionProof(self, content):
if self.dbversion == 1:
return hashlib.sha512(f"{content}".encode("latin1")).hexdigest()
else:
return str(hashlib.sha512(f"{content}{self.passHash}".encode("latin1")).hexdigest()) + f"{self.dbversion}"
BLOCK_SIZE = 16
def pad(data):
length = BLOCK_SIZE - (len(data) % BLOCK_SIZE)
return (data + (chr(length) * length)).encode()
def unpad(data):
return data[:-(data[-1] if type(data[-1]) == int else ord(data[-1]))]
def bytes_to_key(data, salt, output=48):
assert len(salt) == 8, len(salt)
data += salt
key = hashlib.md5(data).digest()
final_key = key
while len(final_key) < output:
key = hashlib.md5(key + data).digest()
final_key += key
return final_key[:output]
def encrypt(message, passphrase):
salt = Random.new().read(8)
key_iv = bytes_to_key(passphrase, salt, 32 + 16)
key = key_iv[:32]
iv = key_iv[32:]
aes = AES.new(key, AES.MODE_CBC, iv)
return base64.b64encode(b"Salted__" + salt + aes.encrypt(pad(message))).decode("utf-8")
def decrypt(encrypted, passphrase):
encrypted = base64.b64decode(encrypted)
assert encrypted[0:8] == b"Salted__"
salt = encrypted[8:16]
key_iv = bytes_to_key(passphrase, salt, 32 + 16)
key = key_iv[:32]
iv = key_iv[32:]
aes = AES.new(key, AES.MODE_CBC, iv)
return unpad(aes.decrypt(encrypted[16:])).decode("utf-8")
class DB:
def __init__(self, login: str, password: str) -> None:
self.text = None
self.login = login
self.password = password
@property
def data(self) -> dict:
self.text = ProtectedTextApi(self.login, self.password)
return json.loads(self.text.view())
def save(self, data) -> None:
self.text.save(json.dumps(data))
|
<reponame>AndreasGocht/PyOgg
import ctypes
from .pyogg_error import PyOggError
from .ogg import PYOGG_OGG_AVAIL
from .vorbis import PYOGG_VORBIS_AVAIL, PYOGG_VORBIS_FILE_AVAIL, PYOGG_VORBIS_ENC_AVAIL
from .opus import PYOGG_OPUS_AVAIL, PYOGG_OPUS_FILE_AVAIL, PYOGG_OPUS_ENC_AVAIL
from .flac import PYOGG_FLAC_AVAIL
#: PyOgg version number. Versions should comply with PEP440.
__version__ = '0.7'
if (PYOGG_OGG_AVAIL and PYOGG_VORBIS_AVAIL and PYOGG_VORBIS_FILE_AVAIL):
# VorbisFile
from .vorbis_file import VorbisFile
# VorbisFileStream
from .vorbis_file_stream import VorbisFileStream
else:
class VorbisFile: # type: ignore
def __init__(*args, **kw):
if not PYOGG_OGG_AVAIL:
raise PyOggError("The Ogg library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
raise PyOggError("The Vorbis libraries weren't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
class VorbisFileStream: # type: ignore
def __init__(*args, **kw):
if not PYOGG_OGG_AVAIL:
raise PyOggError("The Ogg library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
raise PyOggError("The Vorbis libraries weren't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
if (PYOGG_OGG_AVAIL and PYOGG_OPUS_AVAIL and PYOGG_OPUS_FILE_AVAIL):
# OpusFile
from .opus_file import OpusFile
# OpusFileStream
from .opus_file_stream import OpusFileStream
from .opus_memory import OpusMemeory
else:
class OpusFile: # type: ignore
def __init__(*args, **kw):
if not PYOGG_OGG_AVAIL:
raise PyOggError("The Ogg library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
if not PYOGG_OPUS_AVAIL:
raise PyOggError("The Opus library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
if not PYOGG_OPUS_FILE_AVAIL:
raise PyOggError("The OpusFile library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
raise PyOggError("Unknown initialisation error")
class OpusMemory: # type: ignore
def __init__(*args, **kw):
if not PYOGG_OGG_AVAIL:
raise PyOggError("The Ogg library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
if not PYOGG_OPUS_AVAIL:
raise PyOggError("The Opus library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
if not PYOGG_OPUS_FILE_AVAIL:
raise PyOggError("The OpusFile library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
raise PyOggError("Unknown initialisation error")
class OpusFileStream: # type: ignore
def __init__(*args, **kw):
if not PYOGG_OGG_AVAIL:
raise PyOggError("The Ogg library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
if not PYOGG_OPUS_AVAIL:
raise PyOggError("The Opus library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
if not PYOGG_OPUS_FILE_AVAIL:
raise PyOggError("The OpusFile library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
raise PyOggError("Unknown initialisation error")
if PYOGG_OPUS_AVAIL:
# OpusEncoder
from .opus_encoder import OpusEncoder
# OpusBufferedEncoder
from .opus_buffered_encoder import OpusBufferedEncoder
# OpusDecoder
from .opus_decoder import OpusDecoder
else:
class OpusEncoder: # type: ignore
def __init__(*args, **kw):
raise PyOggError("The Opus library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
class OpusBufferedEncoder: # type: ignore
def __init__(*args, **kw):
raise PyOggError("The Opus library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
class OpusDecoder: # type: ignore
def __init__(*args, **kw):
raise PyOggError("The Opus library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
if (PYOGG_OGG_AVAIL and PYOGG_OPUS_AVAIL):
# OggOpusWriter
from .ogg_opus_writer import OggOpusWriter
else:
class OggOpusWriter: # type: ignore
def __init__(*args, **kw):
if not PYOGG_OGG_AVAIL:
raise PyOggError("The Ogg library wasn't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
raise PyOggError("The Opus library was't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
if PYOGG_FLAC_AVAIL:
# FlacFile
from .flac_file import FlacFile
# FlacFileStream
from .flac_file_stream import FlacFileStream
else:
class FlacFile: # type: ignore
def __init__(*args, **kw):
raise PyOggError("The FLAC libraries weren't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
class FlacFileStream: # type: ignore
def __init__(*args, **kw):
raise PyOggError("The FLAC libraries weren't found or couldn't be loaded (maybe you're trying to use 64bit libraries with 32bit Python?)")
|
<reponame>TeddyGlass/MLpipeline<filename>src/pipeline/optimizer.py
from trainer import Trainer
from sklearn.metrics import log_loss
from utils import root_mean_squared_error
from sklearn.model_selection import StratifiedKFold, KFold
from xgboost import XGBRegressor, XGBClassifier
from lightgbm import LGBMRegressor, LGBMClassifier
from neuralnetwork import NNRegressor, NNClassifier
import numpy as np
import optuna
class Objective:
'''
# Usage
obj = Objective(LGBMRegressor(), X, y)
study = optuna.create_study(
sampler=optuna.samplers.RandomSampler(seed=123))
study.optimize(obj, n_trials=10, n_jobs=-1)
'''
def __init__(self, model, x, y, n_splits, early_stopping_rounds, random_state):
self.model = model
self.model_type = type(self.model.get_model()).__name__
self.x = x
self.y = y
self.n_splits = n_splits
self.random_state = random_state
self.early_stopping_rounds = early_stopping_rounds
def __call__(self, trial):
if 'LGBM' in self.model_type:
self.SPACE = {
'num_leaves': trial.suggest_int('num_leaves', 8, 31),
'subsample': trial.suggest_uniform('subsample', 0.60, 0.80),
'colsample_bytree': trial.suggest_uniform(
'colsample_bytree', 0.60, 0.80),
'bagging_freq': trial.suggest_int(
'bagging_freq', 1, 51, 5),
'min_child_weight': trial.suggest_loguniform(
'min_child_weight', 1, 32),
'min_child_samples': int(trial.suggest_discrete_uniform(
'min_child_samples', 5, 50, 5)),
'min_split_gain': trial.suggest_loguniform(
'min_split_gain', 1e-5, 1e-1),
'learning_rate': 0.05,
'n_estimators': 1000000,
'random_state': 1112
}
elif 'XGB' in self.model_type:
self.SPACE = {
'subsample': trial.suggest_uniform(
'subsample', 0.65, 0.85),
'colsample_bytree': trial.suggest_uniform(
'colsample_bytree', 0.65, 0.80),
'gamma': trial.suggest_loguniform(
'gamma', 1e-8, 1.0),
'min_child_weight': trial.suggest_loguniform(
'min_child_weight', 1, 32),
'learning_rate': 0.05,
'n_estimators': 1000000,
'random_state': 1112
}
elif 'NN' in self.model_type:
self.SPACE = {
"input_dropout": trial.suggest_uniform(
"input_dropout", 0.01, 0.4),
"hidden_layers": trial.suggest_int(
"hidden_layers", 1, 2),
'hidden_units': int(trial.suggest_discrete_uniform(
'hidden_units', 64, 256, 64)),
'hidden_dropout': trial.suggest_uniform(
'hidden_dropout', 0.01, 0.4),
'batch_norm': trial.suggest_categorical(
'batch_norm', ['before_act', 'non']),
'batch_size': int(trial.suggest_discrete_uniform(
'batch_size', 32, 128, 16)),
'learning_rate': 1e-5,
'epochs': 10000
}
# cross validation
if 'Classifier' in self.model_type:
cv = StratifiedKFold(n_splits=self.n_splits, random_state=self.random_state, shuffle=True)
elif 'Regressor' in self.model_type:
cv = KFold(n_splits=self.n_splits, random_state=self.random_state, shuffle=True)
# validate average loss in K-Fold CV on a set of parameters.
LOSS = []
for tr_idx, va_idx in cv.split(self.x, self.y):
if 'LGBM' in self.model_type:
if 'Classifier' in self.model_type:
model_ = Trainer(LGBMClassifier(**self.SPACE))
elif 'Regressor' in self.model_type:
model_ = Trainer(LGBMRegressor(**self.SPACE))
elif 'XGB' in self.model_type:
if 'Classifier' in self.model_type:
model_ = Trainer(XGBClassifier(**self.SPACE))
elif 'Regressor' in self.model_type:
model_ = Trainer(XGBRegressor(**self.SPACE))
elif 'NN' in self.model_type:
if 'Classifier' in self.model_type:
model_ = Trainer(NNClassifier(**self.SPACE))
elif 'Regressor' in self.model_type:
model_ = Trainer(NNRegressor(**self.SPACE))
model_.fit(
self.x[tr_idx],
self.y[tr_idx],
self.x[va_idx],
self.y[va_idx],
self.early_stopping_rounds
)
y_pred = model_.predict(self.x[va_idx]) # best_iteration
if 'Classifier' in self.model_type:
loss = log_loss(self.y[va_idx], y_pred)
elif 'Regressor' in self.model_type:
loss = root_mean_squared_error(self.y[va_idx], y_pred)
LOSS.append(loss)
return np.mean(LOSS)
def optuna_search(obj, n_trials, n_jobs, random_state):
study = optuna.create_study(
sampler=optuna.samplers.RandomSampler(seed=random_state))
study.optimize(obj, n_trials=n_trials, n_jobs=n_jobs)
return study.best_params
if __name__ == "__main__":
pass
|
import os
import argparse
import configparser
from google_email_api.quickstart import main as google_email_service
from oauth2client.service_account import ServiceAccountCredentials as SAC
import gspread
from google_email_api.Google import Create_Service
import base64
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def init_args():
parser = argparse.ArgumentParser()
parser.add_argument('-cp', '--configs_path', default='./configs', type=str, help='The configs path')
# example:
# args.add_argument("arg1", nargs=3, type=int,help="這是第 1 個引數,請輸入三個整數")
return parser.parse_args()
def prepare_GoogleForms_api(configs_path, google_client_url='https://spreadsheets.google.com/feeds'):
Json = '{}/client_secrets.json'.format(configs_path)
Url = [google_client_url]
config_ini = configparser.ConfigParser()
config_ini.read(config_ini_path, encoding="utf-8")
GoogleSheets_key = config_ini.get('GoogleSheets', 'GoogleSheets_key')# GoogleSheets_key = '<KEY>'
Connect = SAC.from_json_keyfile_name(Json, Url)
GoogleSheets = gspread.authorize(Connect)
google_form = GoogleSheets.open_by_key(GoogleSheets_key)
return google_form
def prepare_Gmail_api(configs_path, API_NAME='gmail', API_VERSION='v1', scope='https://mail.google.com/'):
CLIENT_SECRET_FILE = '{}/credentials.json'.format(configs_path)
SCOPES = [scope]
service = Create_Service(CLIENT_SECRET_FILE, API_NAME, API_VERSION, SCOPES)
return service
def create_email_message(email_address = '??@??', download_link='https://drive.google.com/???????=sharing'):
email = MIMEMultipart()
email['to'] = email_address
email['subject'] = 'This is a title.'
context = '''Welcome, \n\nTo download the file, please click on the following link:\n{0} \n\nAuthor'''.format(download_link)
email.attach(MIMEText(context, 'plain'))
return {'raw': base64.urlsafe_b64encode(email.as_string())}
if __name__ == '__main__':
args = init_args()
if not os.path.isdir(args.configs_path):
# make sure configs are exist.
os.makedirs(args.configs_path)
# first time to use Google Api => inintial GOOGLE email API
google_email_service(args.configs_path)
# connect to the google form
google_form = prepare_GoogleForms_api(configs_path=args.configs_path)
# get target form's sheet
target_sheet = google_form.sheet1
# Gmail Service args
gmail_service = prepare_Gmail_api(configs_path=args.configs_path, API_NAME='gmail', API_VERSION='v1', scope='https://mail.google.com/')
sending_flag = target_sheet.cell(2,7).value
endpoint = target_sheet.cell(2,7).value
i = 2 # What is the meaning of the "i"
if endpoint != 1:
target_sheet.update_cell(3,7, "2")
while sending_flag != '2':
email_address = target_sheet.cell(i,2).value
download_link = 'https://drive.google.com/drive/folders/??????????=sharing'
if sending_flag == '1':
print('N') # Is it necessary to use this if-else conditional expressions?
else:
email_message = create_email_message(email_address=email_address, download_link=download_link)
message = gmail_service.users().messages().send(userId='me', body=email_message).execute()
print(message)
target_sheet.update_cell(i,7, "1")
i+=1
sending_flag = target_sheet.cell(i,7).value
print (sending_flag) |
<reponame>miu200521358/segmentation-kit-mmd<filename>mmd/mmd/PmxData.py
# -*- coding: utf-8 -*-
#
import _pickle as cPickle
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import math
import numpy as np
from mmd.module.MParams import BoneLinks
from mmd.module.MMath import MRect, MVector2D, MVector3D, MVector4D, MQuaternion, MMatrix4x4 # noqa
from mmd.utils.MException import SizingException # noqa
from mmd.utils.MLogger import MLogger # noqa
logger = MLogger(__name__, level=MLogger.DEBUG)
class Deform:
def __init__(self, index0):
self.index0 = index0
class Bdef1(Deform):
def __init__(self, index0):
self.index0 = index0
def get_idx_list(self):
return [self.index0]
def __str__(self):
return "<Bdef1 {0}>".format(self.index0)
class Bdef2(Deform):
def __init__(self, index0, index1, weight0):
self.index0 = index0
self.index1 = index1
self.weight0 = weight0
def get_idx_list(self):
return [self.index0, self.index1]
def __str__(self):
return "<Bdef2 {0}, {1}, {2}>".format(self.index0, self.index1, self.weight0)
class Bdef4(Deform):
def __init__(self, index0, index1, index2, index3, weight0, weight1, weight2, weight3):
self.index0 = index0
self.index1 = index1
self.index2 = index2
self.index3 = index3
self.weight0 = weight0
self.weight1 = weight1
self.weight2 = weight2
self.weight3 = weight3
def get_idx_list(self):
return [self.index0, self.index1, self.index2, self.index3]
def __str__(self):
return "<Bdef4 {0}:{1}, {2}:{3}, {4}:{5}, {6}:{7}>".format(self.index0, self.index1, self.index2, self.index3, self.weight0, self.weight1, self.weight2, self.weight3)
class Sdef(Deform):
def __init__(self, index0, index1, weight0, sdef_c, sdef_r0, sdef_r1):
self.index0 = index0
self.index1 = index1
self.weight0 = weight0
self.sdef_c = sdef_c
self.sdef_r0 = sdef_r0
self.sdef_r1 = sdef_r1
def get_idx_list(self):
return [self.index0, self.index1]
def __str__(self):
return "<Sdef {0}, {1}, {2}, {3} {4} {5}>".format(self.index0, self.index1, self.weight0, self.sdef_c, self.sdef_r0, self.sdef_r1)
class Qdef(Deform):
def __init__(self, index0, index1, weight0, sdef_c, sdef_r0, sdef_r1):
self.index0 = index0
self.index1 = index1
self.weight0 = weight0
self.sdef_c = sdef_c
self.sdef_r0 = sdef_r0
self.sdef_r1 = sdef_r1
def get_idx_list(self):
return [self.index0, self.index1]
def __str__(self):
return "<Sdef {0}, {1}, {2}, {3} {4} {5}>".format(self.index0, self.index1, self.weight0, self.sdef_c, self.sdef_r0, self.sdef_r1)
# 頂点構造 ----------------------------
class Vertex:
def __init__(self, index, position, normal, uv, extended_uvs, deform, edge_factor):
self.index = index
self.position = position
self.normal = normal
self.uv = uv
self.extended_uvs = extended_uvs or []
self.deform = deform
self.edge_factor = edge_factor
def __str__(self):
return "<Vertex index:{0}, position:{1}, normal:{2}, uv:{3}, extended_uv: {4}, deform:{5}, edge:{6}".format(
self.index, self.position, self.normal, self.uv, len(self.extended_uvs), self.deform, self.edge_factor)
def is_deform_index(self, target_idx):
if type(self.deform) is Bdef1:
return self.deform.index0 == target_idx
elif type(self.deform) is Bdef2:
return self.deform.index0 == target_idx or self.deform.index1 == target_idx
elif type(self.deform) is Bdef4:
return self.deform.index0 == target_idx or self.deform.index1 == target_idx \
or self.deform.index2 == target_idx or self.deform.index3 == target_idx
elif type(self.deform) is Sdef:
return self.deform.index0 == target_idx or self.deform.index1 == target_idx
elif type(self.deform) is Qdef:
return self.deform.index0 == target_idx or self.deform.index1 == target_idx
return False
# 最もウェイトが乗っているボーンINDEX
def get_max_deform_index(self, head_links_indexes):
if type(self.deform) is Bdef2 or type(self.deform) is Sdef or type(self.deform) is Qdef:
if self.deform.weight0 >= 0.5 and self.deform.index0 in head_links_indexes.keys():
return self.deform.index0
else:
if self.deform.index1 in head_links_indexes.keys():
return self.deform.index1
else:
return self.deform.index0
elif type(self.deform) is Bdef4:
# 上半身系INDEXにウェイトが乗っているボーンのみ対象
target_weights = []
if self.deform.index0 in head_links_indexes.keys():
target_weights.append(self.deform.weight0)
if self.deform.index1 in head_links_indexes.keys():
target_weights.append(self.deform.weight1)
if self.deform.index2 in head_links_indexes.keys():
target_weights.append(self.deform.weight2)
if self.deform.index3 in head_links_indexes.keys():
target_weights.append(self.deform.weight3)
max_weight = max(target_weights)
if max_weight == self.deform.weight1:
return self.deform.index1
elif max_weight == self.deform.weight2:
return self.deform.index2
elif max_weight == self.deform.weight3:
return self.deform.index3
else:
return self.deform.index0
return self.deform.index0
# 材質構造-----------------------
class Material:
def __init__(self, name, english_name, diffuse_color, alpha, specular_factor, specular_color, ambient_color, flag, edge_color, edge_size, texture_index,
sphere_texture_index, sphere_mode, toon_sharing_flag, toon_texture_index=0, comment="", vertex_count=0):
self.name = name
self.english_name = english_name
self.diffuse_color = diffuse_color
self.alpha = alpha
self.specular_color = specular_color
self.specular_factor = specular_factor
self.ambient_color = ambient_color
self.flag = flag
self.edge_color = edge_color
self.edge_size = edge_size
self.texture_index = texture_index
self.sphere_texture_index = sphere_texture_index
self.sphere_mode = sphere_mode
self.toon_sharing_flag = toon_sharing_flag
self.toon_texture_index = toon_texture_index
self.comment = comment
self.vertex_count = vertex_count
def __str__(self):
return "<Material name:{0}, english_name:{1}, diffuse_color:{2}, alpha:{3}, specular_color:{4}, " \
"ambient_color: {5}, flag: {6}, edge_color: {7}, edge_size: {8}, texture_index: {9}, " \
"sphere_texture_index: {10}, sphere_mode: {11}, toon_sharing_flag: {12}, " \
"toon_texture_index: {13}, comment: {14}, vertex_count: {15}".format(
self.name, self.english_name, self.diffuse_color, self.alpha, self.specular_color,
self.ambient_color, self.flag, self.edge_color, self.edge_size, self.texture_index,
self.sphere_texture_index, self.sphere_mode, self.toon_sharing_flag,
self.toon_texture_index, self.comment, self.vertex_count)
class Ik:
def __init__(self, target_index, loop, limit_radian, link=None):
self.target_index = target_index
self.loop = loop
self.limit_radian = limit_radian
self.link = link or []
def __str__(self):
return "<Ik target_index:{0}, loop:{1}, limit_radian:{2}, link:{3}".format(self.target_index, self.loop, self.limit_radian, self.link)
class IkLink:
def __init__(self, bone_index, limit_angle, limit_min=None, limit_max=None):
self.bone_index = bone_index
self.limit_angle = limit_angle
self.limit_min = limit_min or MVector3D()
self.limit_max = limit_max or MVector3D()
def __str__(self):
return "<IkLink bone_index:{0}, limit_angle:{1}, limit_min:{2}, limit_max:{3}".format(self.bone_index, self.limit_angle, self.limit_min, self.limit_max)
# ボーン構造-----------------------
class Bone:
def __init__(self, name, english_name, position, parent_index, layer, flag, tail_position=None, tail_index=-1, effect_index=-1, effect_factor=0.0, fixed_axis=None,
local_x_vector=None, local_z_vector=None, external_key=-1, ik=None):
self.name = name
self.english_name = english_name
self.position = position
self.parent_index = parent_index
self.layer = layer
self.flag = flag
self.tail_position = tail_position or MVector3D()
self.tail_index = tail_index
self.effect_index = effect_index
self.effect_factor = effect_factor
self.fixed_axis = fixed_axis or MVector3D()
self.local_x_vector = local_x_vector or MVector3D()
self.local_z_vector = local_z_vector or MVector3D()
self.external_key = external_key
self.ik = ik
self.index = -1
# 表示枠チェック時にONにするので、デフォルトはFalse
self.display = False
# 親ボーンからの長さ3D版(計算して求める)
self.len_3d = MVector3D()
# オフセット(ローカル)
self.local_offset = MVector3D()
# IKオフセット(グローバル)
self.global_ik_offset = MVector3D()
# IK制限角度
self.ik_limit_min = MVector3D()
self.ik_limit_max = MVector3D()
# IK内積上限値
self.dot_limit = 0
# IK内積上限値(近接)
self.dot_near_limit = 0
# IK内積上限値(遠目)
self.dot_far_limit = 0
# IK内積上限値(単体)
self.dot_single_limit = 0
# IK単位角度
self.degree_limit = 360
self.BONEFLAG_TAILPOS_IS_BONE = 0x0001
self.BONEFLAG_CAN_ROTATE = 0x0002
self.BONEFLAG_CAN_TRANSLATE = 0x0004
self.BONEFLAG_IS_VISIBLE = 0x0008
self.BONEFLAG_CAN_MANIPULATE = 0x0010
self.BONEFLAG_IS_IK = 0x0020
self.BONEFLAG_IS_EXTERNAL_ROTATION = 0x0100
self.BONEFLAG_IS_EXTERNAL_TRANSLATION = 0x0200
self.BONEFLAG_HAS_FIXED_AXIS = 0x0400
self.BONEFLAG_HAS_LOCAL_COORDINATE = 0x0800
self.BONEFLAG_IS_AFTER_PHYSICS_DEFORM = 0x1000
self.BONEFLAG_IS_EXTERNAL_PARENT_DEFORM = 0x2000
def copy(self):
return cPickle.loads(cPickle.dumps(self, -1))
def hasFlag(self, flag):
return (self.flag & flag) != 0
def setFlag(self, flag, enable):
if enable:
self.flag |= flag
else:
self.flag &= ~flag
def getConnectionFlag(self):
return self.hasFlag(self.BONEFLAG_TAILPOS_IS_BONE)
def getRotatable(self):
return self.hasFlag(self.BONEFLAG_CAN_ROTATE)
def getTranslatable(self):
return self.hasFlag(self.BONEFLAG_CAN_TRANSLATE)
def getVisibleFlag(self):
return self.hasFlag(self.BONEFLAG_IS_VISIBLE)
def getManipulatable(self):
return self.hasFlag(self.BONEFLAG_CAN_MANIPULATE)
def getIkFlag(self):
return self.hasFlag(self.BONEFLAG_IS_IK)
def getExternalRotationFlag(self):
return self.hasFlag(self.BONEFLAG_IS_EXTERNAL_ROTATION)
def getExternalTranslationFlag(self):
return self.hasFlag(self.BONEFLAG_IS_EXTERNAL_TRANSLATION)
def getFixedAxisFlag(self):
return self.hasFlag(self.BONEFLAG_HAS_FIXED_AXIS)
def getLocalCoordinateFlag(self):
return self.hasFlag(self.BONEFLAG_HAS_LOCAL_COORDINATE)
def getAfterPhysicsDeformFlag(self):
return self.hasFlag(self.BONEFLAG_IS_AFTER_PHYSICS_DEFORM)
def getExternalParentDeformFlag(self):
return self.hasFlag(self.BONEFLAG_IS_EXTERNAL_PARENT_DEFORM)
def __str__(self):
return "<Bone name:{0}, english_name:{1}, position:{2}, parent_index:{3}, layer:{4}, " \
"flag: {5}, tail_position: {6}, tail_index: {7}, effect_index: {8}, effect_factor: {9}, " \
"fixed_axis: {10}, local_x_vector: {11}, local_z_vector: {12}, " \
"external_key: {13}, ik: {14}, index: {15}".format(
self.name, self.english_name, self.position, self.parent_index, self.layer,
self.flag, self.tail_position, self.tail_index, self.effect_index, self.effect_factor,
self.fixed_axis, self.local_x_vector, self.local_z_vector,
self.external_key, self.ik, self.index)
# モーフ構造-----------------------
class Morph:
def __init__(self, name, english_name, panel, morph_type, offsets=None):
self.index = 0
self.name = name
self.english_name = english_name
self.panel = panel
self.morph_type = morph_type
self.offsets = offsets or []
# 表示枠チェック時にONにするので、デフォルトはFalse
self.display = False
self.related_names = []
def __str__(self):
return "<Morph name:{0}, english_name:{1}, panel:{2}, morph_type:{3}, offsets(len): {4}".format(
self.name, self.english_name, self.panel, self.morph_type, len(self.offsets))
# パネルの名称取得
def get_panel_name(self):
if self.panel == 1:
return "眉"
elif self.panel == 2:
return "目"
elif self.panel == 3:
return "口"
elif self.panel == 4:
return "他"
else:
return "?"
class GroupMorphData:
def __init__(self, morph_index, value):
self.morph_index = morph_index
self.value = value
class VertexMorphOffset:
def __init__(self, vertex_index, position_offset):
self.vertex_index = vertex_index
self.position_offset = position_offset
class BoneMorphData:
def __init__(self, bone_index, position, rotation):
self.bone_index = bone_index
self.position = position
self.rotation = rotation
class UVMorphData:
def __init__(self, vertex_index, uv):
self.vertex_index = vertex_index
self.uv = uv
class MaterialMorphData:
def __init__(self, material_index, calc_mode, diffuse, specular, specular_factor, ambient, edge_color, edge_size, texture_factor, sphere_texture_factor, toon_texture_factor):
self.material_index = material_index
self.calc_mode = calc_mode
self.diffuse = diffuse
self.specular = specular
self.specular_factor = specular_factor
self.ambient = ambient
self.edge_color = edge_color
self.edge_size = edge_size
self.texture_factor = texture_factor
self.sphere_texture_factor = sphere_texture_factor
self.toon_texture_factor = toon_texture_factor
# 表示枠構造-----------------------
class DisplaySlot:
def __init__(self, name, english_name, special_flag, references=None):
self.name = name
self.english_name = english_name
self.special_flag = special_flag
self.references = references or []
def __str__(self):
return "<DisplaySlots name:{0}, english_name:{1}, special_flag:{2}, references(len):{3}".format(self.name, self.english_name, self.special_flag, len(self.references))
# 剛体構造-----------------------
class RigidBody:
def __init__(self, name, english_name, bone_index, collision_group, no_collision_group, shape_type, shape_size, shape_position, shape_rotation, mass, linear_damping, \
angular_damping, restitution, friction, mode):
self.name = name
self.english_name = english_name
self.bone_index = bone_index
self.collision_group = collision_group
self.no_collision_group = no_collision_group
self.shape_type = shape_type
self.shape_size = shape_size
self.shape_position = shape_position
self.shape_rotation = shape_rotation
self.param = RigidBodyParam(mass, linear_damping, angular_damping, restitution, friction)
self.mode = mode
self.index = -1
self.bone_name = ""
self.is_arm_upper = False
self.is_small = False
self.SHAPE_SPHERE = 0
self.SHAPE_BOX = 1
self.SHAPE_CAPSULE = 2
def __str__(self):
return "<RigidBody name:{0}, english_name:{1}, bone_index:{2}, collision_group:{3}, no_collision_group:{4}, " \
"shape_type: {5}, shape_size: {6}, shape_position: {7}, shape_rotation: {8}, param: {9}, " \
"mode: {10}".format(self.name, self.english_name, self.bone_index, self.collision_group, self.no_collision_group,
self.shape_type, self.shape_size, self.shape_position.to_log(), self.shape_rotation.to_log(), self.param, self.mode)
# 剛体: ボーン追従
def isModeStatic(self):
return self.mode == 0
# 剛体: 物理演算
def isModeDynamic(self):
return self.mode == 1
# 剛体: 物理演算 + Bone位置合わせ
def isModeMix(self):
return self.mode == 2
def get_obb(self, fno, bone_pos, bone_matrix, is_aliginment, is_arm_left):
# 剛体の形状別の衝突判定用
if self.shape_type == self.SHAPE_SPHERE:
return Sphere(fno, self.shape_size, self.shape_position, self.shape_rotation, self.bone_name, bone_pos, bone_matrix, is_aliginment, \
is_arm_left, self.is_arm_upper, self.is_small, False)
elif self.shape_type == self.SHAPE_BOX:
return Box(fno, self.shape_size, self.shape_position, self.shape_rotation, self.bone_name, bone_pos, bone_matrix, is_aliginment, \
is_arm_left, self.is_arm_upper, self.is_small, True)
else:
return Capsule(fno, self.shape_size, self.shape_position, self.shape_rotation, self.bone_name, bone_pos, bone_matrix, is_aliginment, \
is_arm_left, self.is_arm_upper, self.is_small, True)
class RigidBodyParam:
def __init__(self, mass, linear_damping, angular_damping, restitution, friction):
self.mass = mass
self.linear_damping = linear_damping
self.angular_damping = angular_damping
self.restitution = restitution
self.friction = friction
def __str__(self):
return "<RigidBodyParam mass:{0}, linear_damping:{1}, angular_damping:{2}, restitution:{3}, friction: {4}".format(
self.mass, self.linear_damping, self.angular_damping, self.restitution, self.friction)
# OBB(有向境界ボックス:Oriented Bounding Box)
class OBB:
def __init__(self, fno, shape_size, shape_position, shape_rotation, bone_name, bone_pos, bone_matrix, is_aliginment, is_arm_left, is_arm_upper, is_small, is_init_rot):
self.fno = fno
self.shape_size = shape_size
self.shape_position = shape_position
self.shape_rotation = shape_rotation
self.shape_rotation_qq = MQuaternion.fromEulerAngles(math.degrees(shape_rotation.x()), math.degrees(shape_rotation.y()), math.degrees(shape_rotation.z()))
self.bone_pos = bone_pos
self.h_sign = 1 if is_arm_left else -1
self.v_sign = -1 if is_arm_upper and is_small else 1
self.is_aliginment = is_aliginment
self.is_arm_upper = is_arm_upper
self.is_small = is_small
self.is_arm_left = is_arm_left
# 回転なし行列
self.matrix = bone_matrix[bone_name].copy()
# 回転あり行列
self.rotated_matrix = bone_matrix[bone_name].copy()
# 剛体自体の位置
self.matrix.translate(self.shape_position - bone_pos)
self.rotated_matrix.translate(self.shape_position - bone_pos)
# 剛体自体の回転(回転用行列だけ保持)
self.rotated_matrix.rotate(self.shape_rotation_qq)
# 剛体自体の原点
self.origin = self.matrix * MVector3D(0, 0, 0)
self.origin_xyz = {"x": self.origin.x(), "y": self.origin.y(), "z": self.origin.z()}
self.shape_size_xyz = {"x": self.shape_size.x(), "y": self.shape_size.y(), "z": self.shape_size.z()}
# OBBとの衝突判定
def get_collistion(self, point, root_global_pos, max_length):
pass
# 球剛体
class Sphere(OBB):
def __init__(self, *args):
super().__init__(*args)
# 衝突しているか
def get_collistion(self, point, root_global_pos, max_length):
# 原点との距離が半径未満なら衝突
d = point.distanceToPoint(self.origin)
collision = 0 < d < self.shape_size.x() * 0.98
near_collision = 0 <= d <= self.shape_size.x() * 1.02
x_distance = 0
z_distance = 0
rep_x_collision_vec = MVector3D()
rep_z_collision_vec = MVector3D()
if collision or near_collision:
# 剛体のローカル座標系に基づく点の位置
local_point = self.matrix.inverted() * point
x = self.shape_size.x() * 1.02 * self.h_sign
y = self.shape_size.x() * 1.02 * self.v_sign
z = self.shape_size.x() * 1.02 * -1 # (np.sign(local_point.z()) if self.is_arm_upper else -1)
# 各軸方向の離れ具合
x_theta = math.acos(max(-1, min(1, local_point.x() / x)))
y_theta = math.acos(max(-1, min(1, local_point.y() / y)))
z_theta = math.acos(max(-1, min(1, local_point.z() / z)))
# 離れ具合から見た円周の位置
sin_y_theta = math.sin(y_theta) * 1.02
sin_x_theta = math.sin(x_theta) * 1.02
sin_z_theta = math.sin(z_theta) * 1.02
new_y = local_point.y()
new_x_local = MVector3D(y_theta * x, new_y, local_point.z())
new_z_local = MVector3D(local_point.x(), new_y, y_theta * z)
x_distance = new_x_local.distanceToPoint(local_point)
z_distance = new_z_local.distanceToPoint(local_point)
rep_x_collision_vec = self.matrix * new_x_local
rep_z_collision_vec = self.matrix * new_z_local
# 腕の位置を起点とする行列(移動量だけ見る)
arm_matrix = MMatrix4x4()
arm_matrix.setToIdentity()
arm_matrix.translate(root_global_pos)
# 腕から見た回避位置
x_arm_local = arm_matrix.inverted() * rep_x_collision_vec
z_arm_local = arm_matrix.inverted() * rep_z_collision_vec
if x_arm_local.length() >= max_length:
# 最大可能距離より長い場合、縮める
x_arm_local *= (max_length / x_arm_local.length()) * 0.98
rep_x_collision_vec = arm_matrix * x_arm_local
new_x_local = self.matrix.inverted() * rep_x_collision_vec
x_distance = new_x_local.distanceToPoint(local_point)
if z_arm_local.length() >= max_length:
# 最大可能距離より長い場合、縮める
z_arm_local *= (max_length / z_arm_local.length()) * 0.98
rep_z_collision_vec = arm_matrix * z_arm_local
new_z_local = self.matrix.inverted() * rep_z_collision_vec
z_distance = new_z_local.distanceToPoint(local_point)
logger.debug("f: %s, y: %s, yt: %s, sy: %s, xt: %s, sx: %s, zt: %s, sz: %s, xd: %s, zd: %s, l: %s, d: %s, xl: %s, zl: %s, xr: %s, zr: %s", \
self.fno, local_point.y() / y, y_theta, sin_y_theta, x_theta, sin_x_theta, z_theta, sin_z_theta, x_distance, z_distance, local_point.to_log(), d, \
new_x_local.to_log(), new_z_local.to_log(), rep_x_collision_vec, rep_z_collision_vec)
# 3方向の間に点が含まれていたら衝突あり
return (collision, near_collision, x_distance, z_distance, rep_x_collision_vec, rep_z_collision_vec)
# 箱剛体
class Box(OBB):
def __init__(self, *args):
super().__init__(*args)
# 衝突しているか(内外判定)
# https://stackoverflow.com/questions/21037241/how-to-determine-a-point-is-inside-or-outside-a-cube
def get_collistion(self, point, root_global_pos, max_length):
# 立方体の中にある場合、衝突
# ---------
# 下辺
b1 = self.matrix * MVector3D(-self.shape_size.x(), -self.shape_size.y(), -self.shape_size.z())
b2 = self.matrix * MVector3D(self.shape_size.x(), -self.shape_size.y(), -self.shape_size.z())
b4 = self.matrix * MVector3D(-self.shape_size.x(), -self.shape_size.y(), self.shape_size.z())
# 上辺
t1 = self.matrix * MVector3D(-self.shape_size.x(), self.shape_size.y(), -self.shape_size.z())
d1 = (t1 - b1)
size1 = d1.length()
dir1 = d1 / size1
dir1.effective()
d2 = (b2 - b1)
size2 = d2.length()
dir2 = d2 / size2
dir2.effective()
d3 = (b4 - b1)
size3 = d3.length()
dir3 = d3 / size3
dir3.effective()
dir_vec = point - self.origin
dir_vec.effective()
res1 = abs(MVector3D.dotProduct(dir_vec, dir1)) * 2 < size1
res2 = abs(MVector3D.dotProduct(dir_vec, dir2)) * 2 < size2
res3 = abs(MVector3D.dotProduct(dir_vec, dir3)) * 2 < size3
# 3方向の間に点が含まれていたら衝突あり
collision = (res1 and res2 and res3 and True)
# ---------
# 下辺
b1 = self.matrix * MVector3D(-self.shape_size.x(), -self.shape_size.y(), -self.shape_size.z()) * 1.02
b2 = self.matrix * MVector3D(self.shape_size.x(), -self.shape_size.y(), -self.shape_size.z()) * 1.02
b4 = self.matrix * MVector3D(-self.shape_size.x(), -self.shape_size.y(), self.shape_size.z()) * 1.02
# 上辺
t1 = self.matrix * MVector3D(-self.shape_size.x(), self.shape_size.y(), -self.shape_size.z()) * 1.02
d1 = (t1 - b1)
size1 = d1.length()
dir1 = d1 / size1
dir1.effective()
d2 = (b2 - b1)
size2 = d2.length()
dir2 = d2 / size2
dir2.effective()
d3 = (b4 - b1)
size3 = d3.length()
dir3 = d3 / size3
dir3.effective()
dir_vec = point - self.origin
dir_vec.effective()
res1 = abs(MVector3D.dotProduct(dir_vec, dir1)) * 2 < size1
res2 = abs(MVector3D.dotProduct(dir_vec, dir2)) * 2 < size2
res3 = abs(MVector3D.dotProduct(dir_vec, dir3)) * 2 < size3
# 3方向の間に点が含まれていたら衝突あり
near_collision = (res1 and res2 and res3 and True)
x_distance = 0
z_distance = 0
rep_x_collision_vec = MVector3D()
rep_z_collision_vec = MVector3D()
if collision or near_collision:
# 左右の腕のどちらと衝突しているかにより、元に戻す方向が逆になる
x = self.shape_size.x() * 1.02 * self.h_sign
z = -self.shape_size.z() * 1.02
# X方向にOBBの境界に持って行った場合の位置
x_base = self.rotated_matrix * MVector3D(x, 0, 0)
# Z方向に同上
z_base = self.rotated_matrix * MVector3D(0, 0, z)
logger.test("x_base: %s", x_base)
logger.test("z_base: %s", z_base)
x_diff = x_base.distanceToPoint(point)
z_diff = z_base.distanceToPoint(point)
logger.test("x_diff: %s", x_diff)
logger.test("z_diff: %s", z_diff)
# 剛体のローカル座標系に基づく点の位置
local_point = self.rotated_matrix.inverted() * point
new_y = local_point.y()
new_x_local = MVector3D(x, new_y, local_point.z())
new_z_local = MVector3D(local_point.x(), new_y, z)
x_distance = new_x_local.distanceToPoint(local_point)
z_distance = new_z_local.distanceToPoint(local_point)
rep_x_collision_vec = self.rotated_matrix * new_x_local
rep_z_collision_vec = self.rotated_matrix * new_z_local
# 腕の位置を起点とする行列(移動量だけ見る)
arm_matrix = MMatrix4x4()
arm_matrix.setToIdentity()
arm_matrix.translate(root_global_pos)
# 腕から見た回避位置
x_arm_local = arm_matrix.inverted() * rep_x_collision_vec
z_arm_local = arm_matrix.inverted() * rep_z_collision_vec
if x_arm_local.length() >= max_length:
# 最大可能距離より長い場合、縮める
x_arm_local *= (max_length / x_arm_local.length()) * 0.98
rep_x_collision_vec = arm_matrix * x_arm_local
new_x_local = self.matrix.inverted() * rep_x_collision_vec
x_distance = new_x_local.distanceToPoint(local_point)
if z_arm_local.length() >= max_length:
# 最大可能距離より長い場合、縮める
z_arm_local *= (max_length / z_arm_local.length()) * 0.98
rep_z_collision_vec = arm_matrix * z_arm_local
new_z_local = self.matrix.inverted() * rep_z_collision_vec
z_distance = new_z_local.distanceToPoint(local_point)
logger.debug("f: %s, xd: %s, zd: %s, l: %s, xl: %s, zl: %s, xr: %s, zr: %s", \
self.fno, x_distance, z_distance, local_point.to_log(), new_x_local.to_log(), new_z_local.to_log(), rep_x_collision_vec, rep_z_collision_vec)
return (collision, near_collision, x_distance, z_distance, rep_x_collision_vec, rep_z_collision_vec)
# カプセル剛体
class Capsule(OBB):
def __init__(self, *args):
super().__init__(*args)
# 衝突しているか
# http://marupeke296.com/COL_3D_No27_CapsuleCapsule.html
def get_collistion(self, point, root_global_pos, max_length):
# 下辺
b1 = self.rotated_matrix * MVector3D(0, -self.shape_size.y(), 0)
# 上辺
t1 = self.rotated_matrix * MVector3D(0, self.shape_size.y(), 0)
# 垂線までの長さ
v = (t1 - b1)
lensq = v.lengthSquared()
t = 0 if lensq == 0 else MVector3D.dotProduct(v, point - b1) / lensq
# 垂線を下ろした座標
h = b1 + (v * t)
logger.test("v: %s", v)
logger.test("lensq: %s", lensq)
logger.test("t: %s", t)
logger.test("h: %s", h)
# 点・下辺始点・垂線点の三角形
ba = (point - b1).lengthSquared()
bb = (h - b1).lengthSquared()
bc = (point - h).lengthSquared()
# 点・上辺終点・垂線点の三角形
ta = (point - t1).lengthSquared()
tb = (h - t1).lengthSquared()
tc = (point - h).lengthSquared()
logger.test("ba: %s, bb: %s, bc: %s", ba, bb, bc)
logger.test("ta: %s, tb: %s, tc: %s", ta, tb, tc)
if t1.distanceToPoint(b1) < b1.distanceToPoint(h) < t1.distanceToPoint(h):
# b1側の外分点
h = b1
elif t1.distanceToPoint(b1) < t1.distanceToPoint(h) < b1.distanceToPoint(h):
# t1側の外分点
h = t1
logger.test("v: %s", v)
logger.test("lensq: %s", lensq)
logger.test("t: %s", t)
logger.test("h: %s", h)
logger.test("point: %s", point)
logger.test("segl: %s", point.distanceToPoint(h))
# カプセルの線分から半径以内なら中に入っている
d = point.distanceToPoint(h)
collision = 0 < d < self.shape_size.x() * 0.98
near_collision = 0 <= d <= self.shape_size.x() * 1.02
x_distance = 0
z_distance = 0
rep_x_collision_vec = MVector3D()
rep_z_collision_vec = MVector3D()
if collision or near_collision:
# hのローカル座標系に基づく点の位置
h_matrix = self.matrix.copy()
h_matrix.translate(self.matrix.inverted() * h)
local_point = h_matrix.inverted() * point
logger.debug("h: %s, localh: %s", h, h_matrix * MVector3D())
# 距離分だけ離した場合の球
x = d * 1.02 * self.h_sign
y = d * 1.02 * self.v_sign
z = d * 1.02 * -1 # (np.sign(local_point.z()) if self.is_arm_upper else -1)
# 各軸方向の離れ具合
x_theta = math.acos(max(-1, min(1, local_point.x() / x)))
y_theta = math.acos(max(-1, min(1, abs(local_point.y()) / y)))
z_theta = math.acos(max(-1, min(1, local_point.z() / z)))
# 離れ具合から見た円周の位置
sin_y_theta = math.sin(y_theta) * 1.02
sin_x_theta = math.sin(x_theta) * 1.02
sin_z_theta = math.sin(z_theta) * 1.02
new_y = local_point.y()
new_x_local = MVector3D(y_theta * x, new_y, local_point.z())
new_z_local = MVector3D(local_point.x(), new_y, y_theta * z)
x_distance = new_x_local.distanceToPoint(local_point)
z_distance = new_z_local.distanceToPoint(local_point)
rep_x_collision_vec = h_matrix * new_x_local
rep_z_collision_vec = h_matrix * new_z_local
# 腕の位置を起点とする行列(移動量だけ見る)
arm_matrix = MMatrix4x4()
arm_matrix.setToIdentity()
arm_matrix.translate(root_global_pos)
# 腕から見た回避位置
x_arm_local = arm_matrix.inverted() * rep_x_collision_vec
z_arm_local = arm_matrix.inverted() * rep_z_collision_vec
if x_arm_local.length() >= max_length:
# 最大可能距離より長い場合、縮める
x_arm_local *= (max_length / x_arm_local.length()) * 0.98
rep_x_collision_vec = arm_matrix * x_arm_local
new_x_local = h_matrix.inverted() * rep_x_collision_vec
x_distance = new_x_local.distanceToPoint(local_point)
if z_arm_local.length() >= max_length:
# 最大可能距離より長い場合、縮める
z_arm_local *= (max_length / z_arm_local.length()) * 0.98
rep_z_collision_vec = arm_matrix * z_arm_local
new_z_local = h_matrix.inverted() * rep_z_collision_vec
z_distance = new_z_local.distanceToPoint(local_point)
logger.debug("f: %s, localy: %s, y_theta: %s, sin_y_theta: %s, x_theta: %s, sin_x_theta: %s, z_theta: %s, sin_z_theta: %s, x_distance: %s, z_distance: %s, "\
"local_point: [%s], d: %s, new_x_local: %s, new_z_local: %s, rep_x_collision_vec: %s, rep_z_collision_vec: %s", \
self.fno, local_point.y() / y, y_theta, sin_y_theta, x_theta, sin_x_theta, z_theta, sin_z_theta, x_distance, z_distance, local_point.to_log(), d, \
new_x_local.to_log(), new_z_local.to_log(), rep_x_collision_vec, rep_z_collision_vec)
# 3方向の間に点が含まれていたら衝突あり
return (collision, near_collision, x_distance, z_distance, rep_x_collision_vec, rep_z_collision_vec)
# ジョイント構造-----------------------
class Joint:
def __init__(self, name, english_name, joint_type, rigidbody_index_a, rigidbody_index_b, position, rotation, \
translation_limit_min, translation_limit_max, rotation_limit_min, rotation_limit_max, spring_constant_translation, spring_constant_rotation):
self.name = name
self.english_name = english_name
self.joint_type = joint_type
self.rigidbody_index_a = rigidbody_index_a
self.rigidbody_index_b = rigidbody_index_b
self.position = position
self.rotation = rotation
self.translation_limit_min = translation_limit_min
self.translation_limit_max = translation_limit_max
self.rotation_limit_min = rotation_limit_min
self.rotation_limit_max = rotation_limit_max
self.spring_constant_translation = spring_constant_translation
self.spring_constant_rotation = spring_constant_rotation
def __str__(self):
return "<Joint name:{0}, english_name:{1}, joint_type:{2}, rigidbody_index_a:{3}, rigidbody_index_b:{4}, " \
"position: {5}, rotation: {6}, translation_limit_min: {7}, translation_limit_max: {8}, " \
"spring_constant_translation: {9}, spring_constant_rotation: {10}".format(
self.name, self.english_name, self.joint_type, self.rigidbody_index_a, self.rigidbody_index_b,
self.position, self.rotation, self.translation_limit_min, self.translation_limit_max,
self.spring_constant_translation, self.spring_constant_rotation)
class PmxModel:
def __init__(self):
self.path = ''
self.name = ''
self.english_name = ''
self.comment = ''
self.english_comment = ''
# 頂点データ(キー:ボーンINDEX、値:頂点データリスト)
self.vertices = {}
# 面データ
self.indices = []
# テクスチャデータ
self.textures = []
# 材質データ
self.materials = {}
# 材質データ(キー:材質INDEX、値:材質名)
self.material_indexes = {}
# ボーンデータ
self.bones = {}
# ボーンINDEXデータ(キー:ボーンINDEX、値:ボーン名)
self.bone_indexes = {}
# モーフデータ(順番保持)
self.morphs = {}
# 表示枠データ
self.display_slots = {}
# 剛体データ
self.rigidbodies = {}
# 剛体INDEXデータ
self.rigidbody_indexes = {}
# ジョイントデータ
self.joints = {}
# ハッシュ値
self.digest = None
# 上半身がサイジング可能(標準・準標準ボーン構造)か
self.can_upper_sizing = True
# 腕がサイジング可能(標準・準標準ボーン構造)か
self.can_arm_sizing = True
# 頭頂頂点
self.head_top_vertex = None
# 左足底頂点
self.left_sole_vertex = None
# 右足底頂点
self.right_sole_vertex = None
# 左つま先頂点
self.left_toe_vertex = None
# 右つま先頂点
self.right_toe_vertex = None
# 左右手のひら頂点
self.wrist_entity_vertex = {}
# 左右ひじ頂点
self.elbow_entity_vertex = {}
# 左右ひじ手首中間頂点
self.elbow_middle_entity_vertex = {}
# ローカルX軸の取得
def get_local_x_axis(self, bone_name: str):
if bone_name not in self.bones:
return MVector3D()
bone = self.bones[bone_name]
to_pos = MVector3D()
if bone.fixed_axis != MVector3D():
# 軸制限がある場合、親からの向きを保持
fixed_x_axis = bone.fixed_axis.normalized()
else:
fixed_x_axis = MVector3D()
from_pos = self.bones[bone.name].position
if bone.tail_position != MVector3D():
# 表示先が相対パスの場合、保持
to_pos = from_pos + bone.tail_position
elif bone.tail_index >= 0 and bone.tail_index in self.bone_indexes and self.bones[self.bone_indexes[bone.tail_index]].position != bone.position:
# 表示先が指定されているの場合、保持
to_pos = self.bones[self.bone_indexes[bone.tail_index]].position
else:
# 表示先がない場合、とりあえず子ボーンのどれかを選択
for b in self.bones.values():
if b.parent_index == bone.index and self.bones[self.bone_indexes[b.index]].position != bone.position:
to_pos = self.bones[self.bone_indexes[b.index]].position
break
# 軸制限の指定が無い場合、子の方向
x_axis = (to_pos - from_pos).normalized()
if fixed_x_axis != MVector3D() and np.sign(fixed_x_axis.x()) != np.sign(x_axis.x()):
# 軸制限の軸方向と計算上の軸方向が違う場合、逆ベクトル
x_axis = -fixed_x_axis
return x_axis
# 腕のスタンスの違い
def calc_arm_stance(self, from_bone_name: str, to_bone_name=None):
default_pos = MVector3D(1, 0, 0) if "左" in from_bone_name else MVector3D(-1, 0, 0)
return self.calc_stance(from_bone_name, to_bone_name, default_pos)
# 指定ボーン間のスタンス
def calc_stance(self, from_bone_name: str, to_bone_name: str, default_pos: MVector3D):
from_pos = MVector3D()
to_pos = MVector3D()
if from_bone_name in self.bones:
fv = self.bones[from_bone_name]
from_pos = fv.position
if to_bone_name in self.bones:
# TOが指定されている場合、ボーン位置を保持
to_pos = self.bones[to_bone_name].position
else:
# TOの明示が無い場合、表示先からボーン位置取得
if fv.tail_position != MVector3D():
# 表示先が相対パスの場合、保持
to_pos = from_pos + fv.tail_position
elif fv.tail_index >= 0 and fv.tail_index in self.bone_indexes:
# 表示先がボーンの場合、ボーン位置保持
to_pos = self.bones[self.bone_indexes[fv.tail_index]].position
else:
# ここまで来たらデフォルト加算
to_pos = from_pos + default_pos
from_qq = MQuaternion()
diff_pos = MVector3D()
if from_pos != MVector3D() and to_pos != MVector3D():
logger.test("from_pos: %s", from_pos)
logger.test("to_pos: %s", to_pos)
diff_pos = to_pos - from_pos
diff_pos.normalize()
logger.test("diff_pos: %s", diff_pos)
from_qq = MQuaternion.rotationTo(default_pos, diff_pos)
logger.test("[z] from_bone_name: %s, from_qq: %s", from_bone_name, from_qq.toEulerAngles())
return diff_pos, from_qq
# 腕系サイジングが可能かチェック
def check_arm_bone_can_sizing(self):
target_bones = ["左腕", "左ひじ", "左手首", "右腕", "右ひじ", "右手首"]
cannot_sizing = "腕系処理をスキップします。\n腕系処理(腕スタンス補正・捩り分散・接触回避・位置合わせ)を実行したい場合、\n腕タブのチェックスキップFLGをONにして再実行してください。"
if not set(target_bones).issubset(self.bones.keys()):
logger.warning("腕・ひじ・手首の左右ボーンが揃ってないため、%s\nモデル: %s", cannot_sizing, self.name, decoration=MLogger.DECORATION_BOX)
return False
for bone_name in self.bones.keys():
if ("腕IK" in bone_name or "腕IK" in bone_name or "うでIK" in bone_name or "うでIK" in bone_name or "腕XIK" in bone_name):
# 腕IKが入ってて、かつそれが表示されてる場合、NG
logger.warning("モデルに「腕IK」に類するボーンが含まれているため、%s\nモデル: %s", cannot_sizing, self.name, decoration=MLogger.DECORATION_BOX)
return False
return True
# ボーンリンク生成
def create_link_2_top_lr(self, *target_bone_types, **kwargs):
is_defined = kwargs["is_defined"] if "is_defined" in kwargs else True
for target_bone_type in target_bone_types:
left_links = self.create_link_2_top_one("左{0}".format(target_bone_type), is_defined=is_defined)
right_links = self.create_link_2_top_one("右{0}".format(target_bone_type), is_defined=is_defined)
if left_links and right_links:
# IKリンクがある場合、そのまま返す
return {"左": left_links, "右": right_links}
# ボーンリンク生成
def create_link_2_top_one(self, *target_bone_names, **kwargs):
is_defined = kwargs["is_defined"] if "is_defined" in kwargs else True
for target_bone_name in target_bone_names:
links = self.create_link_2_top(target_bone_name, None, is_defined)
if links and target_bone_name in links.all():
reversed_links = BoneLinks()
# リンクがある場合、反転させて返す
for lname in reversed(list(links.all().keys())):
reversed_links.append(links.get(lname))
return reversed_links
# 最後まで回しても取れなかった場合、エラー
raise SizingException("ボーンリンクの生成に失敗しました。モデル「%s」に「%s」のボーンがあるか確認してください。" % (self.name, ",".join(target_bone_names)))
# リンク生成
def create_link_2_top(self, target_bone_name: str, links: BoneLinks, is_defined: bool):
if not links:
# まだリンクが生成されていない場合、順序保持辞書生成
links = BoneLinks()
if target_bone_name not in self.bones and target_bone_name not in self.PARENT_BORN_PAIR:
# 開始ボーン名がなければ終了
return links
start_type_bone = target_bone_name
if target_bone_name.startswith("右") or target_bone_name.startswith("左"):
# 左右から始まってたらそれは除く
start_type_bone = target_bone_name[1:]
# 自分をリンクに登録
links.append(self.bones[target_bone_name].copy())
parent_name = None
if is_defined:
# 定義済みの場合
if target_bone_name not in self.PARENT_BORN_PAIR:
raise SizingException("ボーンリンクの生成に失敗しました。モデル「%s」の「%s」ボーンが準標準までの構造ではない可能性があります。" % (self.name, target_bone_name))
for pname in self.PARENT_BORN_PAIR[target_bone_name]:
# 親子関係のボーンリストから親ボーンが存在した場合
if pname in self.bones:
parent_name = pname
break
else:
# 未定義でよい場合
if self.bones[target_bone_name].parent_index >= 0:
# 親ボーンが存在している場合
parent_name = self.bone_indexes[self.bones[target_bone_name].parent_index]
if not parent_name:
# 親ボーンがボーンインデックスリストになければ終了
return links
logger.test("target_bone_name: %s. parent_name: %s, start_type_bone: %s", target_bone_name, parent_name, start_type_bone)
# 親をたどる
try:
return self.create_link_2_top(parent_name, links, is_defined)
except RecursionError:
raise SizingException("ボーンリンクの生成に失敗しました。\nモデル「{0}」の「{1}」ボーンで以下を確認してください。\n" \
+ "・同じ名前のボーンが複数ないか(ボーンのINDEXがズレるため、サイジングに失敗します)\n" \
+ "・親ボーンに自分の名前と同じ名前のボーンが指定されていないか\n※ PMXEditorの「PMXデータの状態検証」から確認できます。".format(self.name, target_bone_name))
# 子孫ボーンリスト取得
def get_child_bones(self, target_bone: Bone, bone_list=None):
if not bone_list:
bone_list = []
child_bone_list = []
for child_bone in self.bones.values():
if child_bone.index != target_bone.index and child_bone.parent_index == target_bone.index:
# 処理対象ボーンが親INDEXに入ってる場合、処理対象
bone_list.append(child_bone)
child_bone_list.append(child_bone)
for child_bone in child_bone_list:
self.get_child_bones(child_bone, bone_list)
return bone_list
# ボーン関係親子のペア
PARENT_BORN_PAIR = {
"SIZING_ROOT_BONE": [""],
"全ての親": ["SIZING_ROOT_BONE"],
"センター": ["全ての親", "SIZING_ROOT_BONE"],
"グルーブ": ["センター"],
"センター実体": ["グルーブ", "センター"],
"腰": ["センター実体", "グルーブ", "センター"],
"足中間": ["下半身"],
"下半身": ["腰", "センター実体", "グルーブ", "センター"],
"上半身": ["腰", "センター実体", "グルーブ", "センター"],
"上半身2": ["上半身"],
"首根元": ["上半身2", "上半身"],
"首根元2": ["首根元", "上半身2", "上半身"],
"首": ["首根元2", "首根元", "上半身2", "上半身"],
"頭": ["首"],
"頭頂実体": ["頭"],
"左肩P": ["首根元2", "首根元", "上半身2", "上半身"],
"左肩": ["左肩P", "首根元2", "首根元", "上半身2", "上半身"],
"左肩下延長": ["左肩"],
"左肩C": ["左肩"],
"左腕": ["左肩C", "左肩"],
"左腕捩": ["左腕"],
"左腕ひじ中間": ["左腕捩", "左腕"],
"左ひじ": ["左腕捩", "左腕"],
"左ひじ実体": ["左ひじ"],
"左手捩": ["左ひじ"],
"左ひじ手首中間": ["左手捩", "左ひじ"],
"左ひじ手首中間実体": ["左ひじ手首中間"],
"左手首": ["左手捩", "左ひじ"],
"左手首実体": ["左手首"],
"左親指0": ["左手首"],
"左親指1": ["左親指0", "左手首"],
"左親指2": ["左親指1"],
"左親指先実体": ["左親指2"],
"左人指0": ["左手首"],
"左人指1": ["左人指0", "左手首"],
"左人指2": ["左人指1"],
"左人指3": ["左人指2"],
"左人指先実体": ["左人指3"],
"左中指0": ["左手首"],
"左中指1": ["左中指0", "左手首"],
"左中指2": ["左中指1"],
"左中指3": ["左中指2"],
"左中指先実体": ["左中指3"],
"左薬指0": ["左手首"],
"左薬指1": ["左薬指0", "左手首"],
"左薬指2": ["左薬指1"],
"左薬指3": ["左薬指2"],
"左薬指先実体": ["左薬指3"],
"左小指0": ["左手首"],
"左小指1": ["左小指0", "左手首"],
"左小指2": ["左小指1"],
"左小指3": ["左小指2"],
"左小指先実体": ["左小指3"],
"左足": ["足中間", "下半身"],
"左ひざ": ["左足"],
"左足首": ["左ひざ"],
"左つま先": ["左足首"],
"左足IK親": ["全ての親", "SIZING_ROOT_BONE"],
"左足IK親底実体": ["左足IK親"],
"左足IK": ["左足IK親", "全ての親", "SIZING_ROOT_BONE"],
"左つま先IK": ["左足IK"],
"左足IK底実体": ["左足IK"],
"左足先EX": ["左つま先IK", "左足IK"],
"左足底実体": ["左足先EX", "左つま先IK", "左足IK"],
"左つま先実体": ["左足底実体", "左足先EX", "左つま先IK", "左足IK"],
"右肩P": ["首根元2", "首根元", "上半身2", "上半身"],
"右肩": ["右肩P", "首根元2", "首根元", "上半身2", "上半身"],
"右肩下延長": ["右肩"],
"右肩C": ["右肩"],
"右腕": ["右肩C", "右肩"],
"右腕捩": ["右腕"],
"右腕ひじ中間": ["右腕捩", "右腕"],
"右ひじ": ["右腕捩", "右腕"],
"右ひじ実体": ["右ひじ"],
"右手捩": ["右ひじ"],
"右ひじ手首中間": ["右手捩", "右ひじ"],
"右ひじ手首中間実体": ["右ひじ手首中間"],
"右手首": ["右手捩", "右ひじ"],
"右手首実体": ["右手首"],
"右親指0": ["右手首"],
"右親指1": ["右親指0", "右手首"],
"右親指2": ["右親指1"],
"右親指先実体": ["右親指2"],
"右人指0": ["右手首"],
"右人指1": ["右人指0", "右手首"],
"右人指2": ["右人指1"],
"右人指3": ["右人指2"],
"右人指先実体": ["右人指3"],
"右中指0": ["右手首"],
"右中指1": ["右中指0", "右手首"],
"右中指2": ["右中指1"],
"右中指3": ["右中指2"],
"右中指先実体": ["右中指3"],
"右薬指0": ["右手首"],
"右薬指1": ["右薬指0", "右手首"],
"右薬指2": ["右薬指1"],
"右薬指3": ["右薬指2"],
"右薬指先実体": ["右薬指3"],
"右小指0": ["右手首"],
"右小指1": ["右小指0", "右手首"],
"右小指2": ["右小指1"],
"右小指3": ["右小指2"],
"右小指先実体": ["右小指3"],
"右足": ["足中間", "下半身"],
"右ひざ": ["右足"],
"右足首": ["右ひざ"],
"右つま先": ["右足首"],
"右足IK親": ["全ての親", "SIZING_ROOT_BONE"],
"右足IK親底実体": ["右足IK親"],
"右足IK": ["右足IK親", "全ての親", "SIZING_ROOT_BONE"],
"右つま先IK": ["右足IK"],
"右足IK底実体": ["右足IK"],
"右足先EX": ["右つま先IK", "右足IK"],
"右足底実体": ["右足先EX", "右つま先IK", "右足IK"],
"右つま先実体": ["右足底実体", "右足先EX", "右つま先IK", "右足IK"],
"左目": ["頭"],
"右目": ["頭"]
}
# 頭頂の頂点を取得
def get_head_top_vertex(self):
bone_name_list = ["頭"]
# まずX制限をかけて頂点を取得する
up_max_pos, up_max_vertex, down_max_pos, down_max_vertex, right_max_pos, right_max_vertex, left_max_pos, left_max_vertex, \
back_max_pos, back_max_vertex, front_max_pos, front_max_vertex, multi_max_pos, multi_max_vertex \
= self.get_bone_end_vertex(bone_name_list, self.def_calc_vertex_pos_original, def_is_target=self.def_is_target_x_limit)
if not up_max_vertex:
# 頭頂頂点が取れなかった場合、X制限を外す
up_max_pos, up_max_vertex, down_max_pos, down_max_vertex, right_max_pos, right_max_vertex, left_max_pos, left_max_vertex, \
back_max_pos, back_max_vertex, front_max_pos, front_max_vertex, multi_max_pos, multi_max_vertex \
= self.get_bone_end_vertex(bone_name_list, self.def_calc_vertex_pos_original, def_is_target=None)
if not up_max_vertex:
if "頭" in self.bones:
return Vertex(-1, self.bones["頭"].position.copy(), MVector3D(), [], [], Bdef1(-1), -1)
elif "首" in self.bones:
return Vertex(-1, self.bones["首"].position.copy(), MVector3D(), [], [], Bdef1(-1), -1)
else:
return Vertex(-1, MVector3D(), MVector3D(), [], [], Bdef1(-1), -1)
return up_max_vertex
# 頭用剛体生成
def get_head_rigidbody(self):
bone_name_list = ["頭"]
# 制限なしで前後左右上下
up_max_pos, up_max_vertex, down_max_pos, down_max_vertex, right_max_pos, right_max_vertex, left_max_pos, left_max_vertex, \
back_max_pos, back_max_vertex, front_max_pos, front_max_vertex, multi_max_pos, multi_max_vertex \
= self.get_bone_end_vertex(bone_name_list, self.def_calc_vertex_pos_original, def_is_target=None)
if up_max_vertex and down_max_vertex and right_max_vertex and left_max_vertex and back_max_vertex and front_max_vertex:
# Yは念のため首ボーンまで
y_bottom = max(down_max_vertex.position.y(), self.bones["首"].position.y())
# 頂点が取れた場合、半径算出
# y_len = abs(up_max_vertex.position.y() - y_bottom)
x_len = abs(left_max_vertex.position.x() - right_max_vertex.position.x())
# z_len = abs(back_max_vertex.position.z() - front_max_vertex.position.z())
# X頂点同士の長さの半分
radius = x_len / 2
# center = MVector3D()
# # Yは下辺から半径分上
# center.setY(down_max_vertex.position.y() + (radius * 0.95))
# # Zは前面から半径分後ろ
# center.setZ(front_max_vertex.position.z() + (radius * 0.95))
# 中点
center = MVector3D(np.mean([up_max_vertex.position.data(), down_max_vertex.position.data(), left_max_vertex.position.data(), \
right_max_vertex.position.data(), back_max_vertex.position.data(), front_max_vertex.position.data()], axis=0))
# Xはど真ん中
center.setX(0)
# Yは下辺からちょっと上
center.setY(y_bottom + (radius * 0.98))
# Zはちょっと前に
center.setZ(center.z() - (radius * 0.05))
head_rigidbody = RigidBody("頭接触回避", None, self.bones["頭"].index, 0, 0, 0, \
MVector3D(radius, radius, radius), center, MVector3D(), 0, 0, 0, 0, 0, 0)
head_rigidbody.bone_name = "頭"
head_rigidbody.is_arm_upper = True
return head_rigidbody
return None
# つま先の頂点を取得
def get_toe_vertex(self, direction: str):
# 足首より下で、指ではないボーン
bone_name_list = []
target_bone_name = None
if "{0}足首".format(direction) in self.bones:
target_bone_name = "{0}足首".format(direction)
elif "{0}足IK".format(direction) in self.bones:
target_bone_name = "{0}足IK".format(direction)
else:
# 足末端系ボーンがない場合、処理終了
return Vertex(-1, MVector3D(), MVector3D(), [], [], Bdef1(-1), -1)
# 足末端系ボーン
for bk, bv in self.bones.items():
if ((direction == "右" and bv.position.x() < 0) or (direction == "左" and bv.position.x() > 0)) \
and bv.position.y() <= self.bones[target_bone_name].position.y() and direction in bk:
bone_name_list.append(bk)
if len(bone_name_list) == 0:
# ウェイトボーンがない場合、つま先ボーン系の位置
if "{0}つま先".format(direction) in self.bones:
return Vertex(-1, self.bones["{0}つま先".format(direction)].position, MVector3D(), [], [], Bdef1(-1), -1)
elif "{0}つま先IK".format(direction) in self.bones:
return Vertex(-1, self.bones["{0}つま先IK".format(direction)].position, MVector3D(), [], [], Bdef1(-1), -1)
elif "{0}足首".format(direction) in self.bones:
return Vertex(-1, self.bones["{0}足首".format(direction)].position, MVector3D(), [], [], Bdef1(-1), -1)
elif "{0}足IK".format(direction) in self.bones:
return Vertex(-1, self.bones["{0}足IK".format(direction)].position, MVector3D(), [], [], Bdef1(-1), -1)
else:
return Vertex(-1, MVector3D(), MVector3D(), [], [], Bdef1(-1), -1)
up_max_pos, up_max_vertex, down_max_pos, down_max_vertex, right_max_pos, right_max_vertex, left_max_pos, left_max_vertex, \
back_max_pos, back_max_vertex, front_max_pos, front_max_vertex, multi_max_pos, multi_max_vertex \
= self.get_bone_end_vertex(bone_name_list, self.def_calc_vertex_pos_original, def_is_target=None, \
def_is_multi_target=self.def_is_multi_target_down_front, multi_target_default_val=MVector3D(0, 99999, 99999))
if not front_max_vertex:
# つま先頂点が取れなかった場合
if "{0}つま先".format(direction) in self.bones:
return Vertex(-1, self.bones["{0}つま先".format(direction)].position, MVector3D(), [], [], Bdef1(-1), -1)
elif "{0}つま先IK".format(direction) in self.bones:
return Vertex(-1, self.bones["{0}つま先IK".format(direction)].position, MVector3D(), [], [], Bdef1(-1), -1)
elif "{0}足首".format(direction) in self.bones:
return Vertex(-1, self.bones["{0}足首".format(direction)].position, MVector3D(), [], [], Bdef1(-1), -1)
elif "{0}足IK".format(direction) in self.bones:
return Vertex(-1, self.bones["{0}足IK".format(direction)].position, MVector3D(), [], [], Bdef1(-1), -1)
else:
return Vertex(-1, MVector3D(), MVector3D(), [], [], Bdef1(-1), -1)
return front_max_vertex
# 足底の頂点を取得
def get_sole_vertex(self, direction: str):
# 足首より下で、指ではないボーン
bone_name_list = []
target_bone_name = None
if "{0}足首".format(direction) in self.bones:
target_bone_name = "{0}足首".format(direction)
elif "{0}足IK".format(direction) in self.bones:
target_bone_name = "{0}足IK".format(direction)
else:
# 足末端系ボーンがない場合、処理終了
return Vertex(-1, MVector3D(), MVector3D(), [], [], Bdef1(-1), -1)
# 足末端系ボーン
for bk, bv in self.bones.items():
if ((direction == "右" and bv.position.x() < 0) or (direction == "左" and bv.position.x() > 0)) \
and bv.position.y() <= self.bones[target_bone_name].position.y() and direction in bk:
bone_name_list.append(bk)
if len(bone_name_list) == 0:
# ウェイトボーンがない場合、足IKの位置
if "{0}足IK".format(direction) in self.bones:
return Vertex(-1, self.bones["{0}足IK".format(direction)].position, MVector3D(), [], [], Bdef1(-1), -1)
else:
return Vertex(-1, MVector3D(), MVector3D(), [], [], Bdef1(-1), -1)
up_max_pos, up_max_vertex, down_max_pos, down_max_vertex, right_max_pos, right_max_vertex, left_max_pos, left_max_vertex, \
back_max_pos, back_max_vertex, front_max_pos, front_max_vertex, multi_max_pos, multi_max_vertex \
= self.get_bone_end_vertex(bone_name_list, self.def_calc_vertex_pos_original, def_is_target=None, \
def_is_multi_target=self.def_is_multi_target_down_front_sole, multi_target_default_val=MVector3D(0, 99999, 99999))
if not multi_max_vertex:
# 足底頂点が取れなかった場合
if "{0}足IK".format(direction) in self.bones:
return Vertex(-1, self.bones["{0}足IK".format(direction)].position, MVector3D(), [], [], Bdef1(-1), -1)
else:
return Vertex(-1, MVector3D(), MVector3D(), [], [], Bdef1(-1), -1)
return multi_max_vertex
# 手のひらの厚みをはかる頂点を取得
def get_wrist_vertex(self, direction: str):
# 足首より下で、指ではないボーン
bone_name_list = []
if "{0}手首".format(direction) in self.bones:
for bk, bv in self.bones.items():
if "{0}手首".format(direction) == bk:
bone_name_list.append(bk)
else:
# 手首ボーンがない場合、処理終了
return Vertex(-1, MVector3D(), MVector3D(), [], [], Bdef1(-1), -1)
# 腕の傾き(正確にはひじ以降の傾き)
_, arm_stance_qq = self.calc_arm_stance("{0}ひじ".format(direction), "{0}手首".format(direction))
up_max_pos, up_max_vertex, down_max_pos, down_max_vertex, right_max_pos, right_max_vertex, left_max_pos, left_max_vertex, \
back_max_pos, back_max_vertex, front_max_pos, front_max_vertex, multi_max_pos, multi_max_vertex \
= self.get_bone_end_vertex(bone_name_list, self.def_calc_vertex_pos_horizonal, def_is_target=self.def_is_target_x_limit, \
def_is_multi_target=self.def_is_multi_target_down_front, multi_target_default_val=MVector3D(0, 99999, 99999), qq4calc=arm_stance_qq)
if not down_max_vertex:
# 手首の下(手のひらの厚み)が取れなかった場合、X制限なしに取得する
up_max_pos, up_max_vertex, down_max_pos, down_max_vertex, right_max_pos, right_max_vertex, left_max_pos, left_max_vertex, \
back_max_pos, back_max_vertex, front_max_pos, front_max_vertex, multi_max_pos, multi_max_vertex \
= self.get_bone_end_vertex(bone_name_list, self.def_calc_vertex_pos_horizonal, def_is_target=None, \
def_is_multi_target=None, multi_target_default_val=None, qq4calc=arm_stance_qq)
if not down_max_vertex:
# それでも取れなければ手首位置
return Vertex(-1, self.bones["{0}手首".format(direction)].position.copy(), MVector3D(), [], [], Bdef1(-1), -1)
return down_max_vertex
# 指先実体をはかる頂点を取得
def get_finger_tail_vertex(self, finger_name: str, finger_tail_name: str):
# 足首より下で、指ではないボーン
bone_name_list = []
direction = finger_name[0]
if finger_name in self.bones:
bone_name_list.append(finger_name)
else:
# 指ボーンがない場合、処理終了
return Vertex(-1, MVector3D(), MVector3D(), [], [], Bdef1(-1), -1)
# 腕の傾き(正確にはひじ以降の傾き)
_, arm_stance_qq = self.calc_arm_stance("{0}手首".format(direction), finger_name)
up_max_pos, up_max_vertex, down_max_pos, down_max_vertex, right_max_pos, right_max_vertex, left_max_pos, left_max_vertex, \
back_max_pos, back_max_vertex, front_max_pos, front_max_vertex, multi_max_pos, multi_max_vertex \
= self.get_bone_end_vertex(bone_name_list, self.def_calc_vertex_pos_horizonal, def_is_target=None, \
def_is_multi_target=None, multi_target_default_val=None, qq4calc=arm_stance_qq)
if direction == "左" and right_max_vertex:
return right_max_vertex
if direction == "右" and left_max_vertex:
return left_max_vertex
# それでも取れなければ手首位置
return Vertex(-1, self.bones[finger_name].position.copy(), MVector3D(), [], [], Bdef1(-1), -1)
# ひじの厚みをはかる頂点を取得
def get_elbow_vertex(self, direction: str):
# 足首より下で、指ではないボーン
bone_name_list = []
if "{0}ひじ".format(direction) in self.bones or "{0}腕".format(direction) in self.bones:
# 念のため、「ひじ」を含むボーンを処理対象とする
for bk, bv in self.bones.items():
if "{0}腕".format(direction) == bk:
bone_name_list.append(bk)
if "{0}ひじ".format(direction) == bk:
bone_name_list.append(bk)
else:
# ひじボーンがない場合、処理終了
return Vertex(-1, MVector3D(), MVector3D(), [], [], Bdef1(-1), -1)
# 腕の傾き(正確にはひじ以降の傾き)
_, arm_stance_qq = self.calc_arm_stance("{0}腕".format(direction), "{0}ひじ".format(direction))
up_max_pos, up_max_vertex, down_max_pos, down_max_vertex, right_max_pos, right_max_vertex, left_max_pos, left_max_vertex, \
back_max_pos, back_max_vertex, front_max_pos, front_max_vertex, multi_max_pos, multi_max_vertex \
= self.get_bone_end_vertex(bone_name_list, self.def_calc_vertex_pos_horizonal, def_is_target=self.def_is_target_x_limit, \
def_is_multi_target=self.def_is_multi_target_down_front, multi_target_default_val=MVector3D(0, 99999, 99999), qq4calc=arm_stance_qq)
if not down_max_vertex:
# 腕もひじが取れなかった場合、X制限なしに取得する
up_max_pos, up_max_vertex, down_max_pos, down_max_vertex, right_max_pos, right_max_vertex, left_max_pos, left_max_vertex, \
back_max_pos, back_max_vertex, front_max_pos, front_max_vertex, multi_max_pos, multi_max_vertex \
= self.get_bone_end_vertex(bone_name_list, self.def_calc_vertex_pos_horizonal, def_is_target=None, \
def_is_multi_target=None, multi_target_default_val=None, qq4calc=arm_stance_qq)
if not down_max_vertex:
# それでも取れなければひじ位置
return Vertex(-1, self.bones["{0}ひじ".format(direction)].position.copy(), MVector3D(), [], [], Bdef1(-1), -1)
return down_max_vertex
# 頂点位置を返す(オリジナルそのまま)
def def_calc_vertex_pos_original(self, b: Bone, v: Vertex, qq4calc: MQuaternion):
return v.position
# 水平にした場合の頂点位置を返す
def def_calc_vertex_pos_horizonal(self, b: Bone, v: Vertex, qq4calc: MQuaternion):
horzinal_v_pos = qq4calc.inverted() * (v.position - self.bones["{0}ひじ".format(b.name[0])].position)
return horzinal_v_pos
# X軸方向の制限がかかった頂点のみを対象とする
def def_is_target_x_limit(self, b: Bone, v: Vertex, v_pos: MVector3D):
return v_pos.x() - 0.1 <= b.position.x() <= v_pos.x() + 0.1
# 最も底面でかつ前面にある頂点であるか
def def_is_multi_target_down_front(self, multi_max_pos: MVector3D, v_pos: MVector3D):
return v_pos.y() <= multi_max_pos.y() + 0.1 and v_pos.z() <= multi_max_pos.z()
# 最も底面でかつ前面にある頂点であるか
def def_is_multi_target_down_front_sole(self, multi_max_pos: MVector3D, v_pos: MVector3D):
return v_pos.y() <= multi_max_pos.y() + 0.1 and v_pos.z() <= multi_max_pos.z()
# 指定ボーンにウェイトが乗っている頂点とそのINDEX
def get_bone_end_vertex(self, bone_name_list, def_calc_vertex_pos, def_is_target=None, def_is_multi_target=None, multi_target_default_val=None, qq4calc=None):
# 指定ボーンにウェイトが乗っているボーンINDEXリスト
bone_idx_list = []
for bk, bv in self.bones.items():
if bk in bone_name_list and bv.index in self.vertices:
bone_idx_list.append(bv.index)
if len(bone_idx_list) == 0:
logger.test("bone_name: %s, ウェイト頂点がない", bone_name_list)
# ウェイトボーンがない場合、初期値
return MVector3D(), None, MVector3D(), None, MVector3D(), None, MVector3D(), None, MVector3D(), None, MVector3D(), None, MVector3D(), None
logger.test("model: %s, bone_name: %s, bone_idx_list:%s", self.name, bone_name_list, bone_idx_list)
up_max_pos = MVector3D(0, -99999, 0)
up_max_vertex = None
down_max_pos = MVector3D(0, 99999, 0)
down_max_vertex = None
right_max_pos = MVector3D(99999, 0, 0)
right_max_vertex = None
left_max_pos = MVector3D(-99999, 0, 0)
left_max_vertex = None
back_max_pos = MVector3D(0, 0, -99999)
back_max_vertex = None
front_max_pos = MVector3D(0, 0, 99999)
front_max_vertex = None
multi_max_pos = multi_target_default_val
multi_max_vertex = None
for bone_idx in bone_idx_list:
if bone_idx not in self.bone_indexes:
continue
# ボーンINDEXに該当するボーン
bone = self.bones[self.bone_indexes[bone_idx]]
for v in self.vertices[bone_idx]:
v_pos = def_calc_vertex_pos(bone, v, qq4calc)
if def_is_target and def_is_target(bone, v, v_pos) or not def_is_target:
# 処理対象頂点である場合のみ判定処理に入る
if v_pos.y() < down_max_pos.y():
# 指定ボーンにウェイトが乗っていて、かつ最下の頂点より下の場合、保持
down_max_pos = v_pos
down_max_vertex = v
if v_pos.y() > up_max_pos.y():
# 指定ボーンにウェイトが乗っていて、かつ最上の頂点より上の場合、保持
up_max_pos = v_pos
up_max_vertex = v
if v_pos.x() < right_max_pos.x():
# 指定ボーンにウェイトが乗っていて、かつ最下の頂点より下の場合、保持
right_max_pos = v_pos
right_max_vertex = v
if v_pos.x() > left_max_pos.x():
# 指定ボーンにウェイトが乗っていて、かつ最上の頂点より上の場合、保持
left_max_pos = v_pos
left_max_vertex = v
if v_pos.z() < front_max_pos.z():
# 指定ボーンにウェイトが乗っていて、かつ最上の頂点より手前の場合、保持
front_max_pos = v_pos
front_max_vertex = v
if v_pos.z() > back_max_pos.z():
# 指定ボーンにウェイトが乗っていて、かつ最下の頂点より奥の場合、保持
back_max_pos = v_pos
back_max_vertex = v
if def_is_multi_target and def_is_multi_target(multi_max_pos, v_pos):
multi_max_pos = v_pos
multi_max_vertex = v
return up_max_pos, up_max_vertex, down_max_pos, down_max_vertex, right_max_pos, right_max_vertex, left_max_pos, left_max_vertex, \
back_max_pos, back_max_vertex, front_max_pos, front_max_vertex, multi_max_pos, multi_max_vertex
@classmethod
def get_effective_value(cls, v):
if math.isnan(v):
return 0
if math.isinf(v):
return 0
return v
@classmethod
def set_effective_value_vec3(cls, vec3):
vec3.setX(cls.get_effective_value(vec3.x()))
vec3.setY(cls.get_effective_value(vec3.y()))
vec3.setZ(cls.get_effective_value(vec3.z()))
|
<gh_stars>1-10
import datetime
import json
from django.db.models import Count
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from ..models import Task
class TaskTestCase(TestCase):
def setUp(self):
self.user = self.create_user()
self.client.login(username='ragsagar', password='password')
self.task = self.create_task()
self.create_task(title="Completed Task",
status=Task.STATUS_CHOICES.complete)
self.create_task(title="Task Ready for Review",
status=Task.STATUS_CHOICES.ready_for_review)
def create_task(self, title="Test task", status=1, priority=1):
data = {
'created_by': self.user,
'title': title,
'priority': priority,
'module': 'CRM',
'due_date': datetime.date(2014, 4, 2),
'type': 3,
'description': 'testing task',
'assigned_user': self.user,
'status': status,
}
return Task.objects.create(**data)
def create_user(self, **kwargs):
user_data = {}
user_data['username'] = 'ragsagar'
user_data['password'] = 'password'
user_data.update(kwargs)
user = User.objects.create_user(**user_data)
return user
def test_list_tasks_view(self):
"""
Tests for the view to list all tasks.
"""
list_tasks_url = reverse('list_tasks')
response = self.client.get(list_tasks_url)
self.assertEqual(response.status_code, 200)
tasks = Task.objects.all().exclude(status=Task.STATUS_CHOICES.complete)
self.assertEqual(len(response.context_data['task_list']), tasks.count())
self.assertNotIn(
Task.STATUS_CHOICES.complete,
response.context_data['task_list'].values_list('status', flat=True))
self.assertTemplateUsed(response, 'tasks/task_list.html')
self.assertIn(str(self.task.get_absolute_url()),
response.rendered_content)
def test_list_incomplete_tasks_view(self):
"""
Tests for the view to list all tasks.
"""
list_tasks_url = reverse('list_incomplete_tasks')
response = self.client.get(list_tasks_url)
self.assertEqual(response.status_code, 200)
tasks = Task.objects.filter(status=Task.STATUS_CHOICES.incomplete)
self.assertEqual(len(response.context_data['task_list']), tasks.count())
status_of_all_tasks = response.context_data['task_list'].values_list(
'status',
flat=True)
self.assertNotIn(Task.STATUS_CHOICES.complete, status_of_all_tasks)
self.assertNotIn(Task.STATUS_CHOICES.ready_for_review,
status_of_all_tasks)
self.assertTemplateUsed(response, 'tasks/task_list.html')
self.assertIn(str(self.task.get_absolute_url()),
response.rendered_content)
def test_list_unreviewed_tasks_view(self):
"""
Tests for the view to list all tasks.
"""
list_tasks_url = reverse('list_unreviewed_tasks')
response = self.client.get(list_tasks_url)
self.assertEqual(response.status_code, 200)
tasks = Task.objects.filter(status=Task.STATUS_CHOICES.ready_for_review)
self.assertEqual(len(response.context_data['task_list']), tasks.count())
status_of_all_tasks = response.context_data['task_list'].values_list(
'status',
flat=True)
self.assertNotIn(Task.STATUS_CHOICES.complete, status_of_all_tasks)
self.assertNotIn(Task.STATUS_CHOICES.incomplete, status_of_all_tasks)
self.assertTemplateUsed(response, 'tasks/task_list.html')
self.assertNotIn(str(self.task.get_absolute_url()),
response.rendered_content)
def test_list_completed_tasks_view(self):
"""
Tests for the view to list all tasks.
"""
list_tasks_url = reverse('list_completed_tasks')
response = self.client.get(list_tasks_url)
self.assertEqual(response.status_code, 200)
tasks = Task.objects.filter(status=Task.STATUS_CHOICES.complete)
self.assertEqual(len(response.context_data['task_list']), tasks.count())
status_of_all_tasks = response.context_data['task_list'].values_list(
'status',
flat=True)
self.assertNotIn(Task.STATUS_CHOICES.ready_for_review, status_of_all_tasks)
self.assertNotIn(Task.STATUS_CHOICES.incomplete, status_of_all_tasks)
self.assertTemplateUsed(response, 'tasks/task_list.html')
self.assertNotIn(str(self.task.get_absolute_url()),
response.rendered_content)
def test_detail_task_view(self):
"""
Test detail task view page.
"""
detail_url = reverse('task_detail', kwargs={'pk': self.task.pk})
response = self.client.get(detail_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context_data['task'], self.task)
self.assertTemplateUsed(response, 'tasks/task_detail.html')
def test_create_task_view(self):
"""
Test view to create new task.
"""
create_task_url = reverse('create_task')
response = self.client.get(create_task_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tasks/task_form.html')
user = User.objects.get(username='ragsagar')
data = {
'title': 'Test task 2',
'priority': 1,
'module': 'HRMS',
'due_date': datetime.date(2014, 4, 5),
'type': 1,
'description': 'This is a description',
'assigned_user_id': user.pk,
}
old_count = Task.objects.all().count()
response = self.client.post(create_task_url, data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Task.objects.all().count(), old_count+1)
def test_set_task_ready_view(self):
"""
Test the view to set task status as ready to be reviewed.
"""
self.task.status = Task.STATUS_CHOICES.incomplete
self.task.save()
pk = self.task.pk
url = reverse('set_task_ready', kwargs={'pk': pk})
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
task = Task.objects.get(pk=pk)
self.assertEqual(task.status, Task.STATUS_CHOICES.ready_for_review)
self.assertIsNotNone(task.completed_at)
def test_set_task_incomplete_view(self):
"""
Test the view to set task status as incomplete.
"""
self.task.status = Task.STATUS_CHOICES.ready_for_review
self.task.save()
pk = self.task.pk
url = reverse('set_task_incomplete', kwargs={'pk': pk})
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
task = Task.objects.get(pk=pk)
self.assertEqual(task.status, Task.STATUS_CHOICES.incomplete)
def test_set_task_complete_view(self):
"""
Test the view to set task status as complete
"""
self.task.status = Task.STATUS_CHOICES.ready_for_review
self.task.save()
pk = self.task.pk
url = reverse('set_task_complete', kwargs={'pk': pk})
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
task = Task.objects.get(pk=pk)
self.assertIsNone(task.reviewed_by)
self.assertEqual(task.status, Task.STATUS_CHOICES.ready_for_review)
# Create a staff user and login as staff user
staff_user = self.create_user(username='staff_user',
password='password',)
staff_user.is_staff = True
staff_user.save()
self.client.login(username='staff_user', password='password')
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
task = Task.objects.get(pk=pk)
self.assertEqual(task.reviewed_by, staff_user)
self.assertEqual(task.status, Task.STATUS_CHOICES.complete)
def test_report_home_view(self):
"""
Test the report home view
"""
url = reverse('report_home')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
tasks = Task.objects.all()
incomplete_tasks_count = tasks.filter(
status=Task.STATUS_CHOICES.incomplete).count()
unreviewed_tasks_count = tasks.filter(
status=Task.STATUS_CHOICES.ready_for_review).count()
completed_tasks_count = tasks.filter(
status=Task.STATUS_CHOICES.complete).count()
self.assertEqual(response.context_data['incomplete_task_count'],
incomplete_tasks_count)
self.assertEqual(response.context_data['unreviewed_tasks_count'],
unreviewed_tasks_count)
self.assertEqual(response.context_data['unreviewed_tasks_count'],
completed_tasks_count)
def test_tasks_json_view(self):
"""
Test the json view of tasks by status and module.
"""
url = reverse('task_by_status_json')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
tasks = Task.objects.all()
tasks_by_status = [
{'data': 1, 'label': 'Incomplete'},
{'data': 1, 'label': 'Ready for Review'},
{'data': 1, 'label': 'Complete'}
]
tasks_by_module = [{'data': 3, 'label': u'CRM'}]
json_string = response.content.decode('utf-8')
data = json.loads(json_string)
self.assertEqual(data.get('task_by_status'), tasks_by_status)
self.assertEqual(data.get('task_by_module'), tasks_by_module)
|
<gh_stars>1-10
from PyMIPS.Datastructure.instruction_types import IType, RType, JType
from PyMIPS.AST.validator import validate
import unittest
class TestRType(unittest.TestCase):
def test_all(self):
try:
RType("add", "$t3", "$t2", "$t1")
RType("addu", "$t3", "$t2", "$t1")
RType("and", "$t3", "$t2", "$t1")
RType("nor", "$t3", "$t2", "$t1")
RType("or", "$t3", "$t2", "$t1")
RType("slt", "$t3", "$t2", "$t1")
RType("sltu", "$t3", "$t2", "$t1")
RType("sub", "$t3", "$t2", "$t1")
RType("subu", "$t3", "$t2", "$t1")
RType("xor", "$t3", "$t2", "$t1")
RType("or", "$s0", "$t0", "$t3")
RType("div", "$t3", "$t2")
RType("divu", "$t3", "$t2")
RType("jalr", "$t3", "$t2")
RType("mult", "$t3", "$t2")
RType("mul", "$t3", "$t2")
RType("move", "$t3", "$t2")
RType("jr", "$ra")
RType("mfhi", "$t3")
RType("mflo", "$t3")
RType("mthi", "$t3")
RType("mtlo", "$t3")
RType("syscall", destination=None)
except:
self.fail()
def test_bad_3(self):
with self.assertRaises(Exception):
RType("add", "$t3")
with self.assertRaises(Exception):
RType("add", "$t3df", "$tdfs", "45")
with self.assertRaises(Exception):
RType("sub", "$t3", "4")
with self.assertRaises(Exception):
RType("move", "$3", "t435", "$vew")
with self.assertRaises(Exception):
RType("lw", "$t3", "$s4", "gfd4")
def test_bad_2(self):
with self.assertRaises(Exception):
RType("add", "$t3", "$t4")
with self.assertRaises(Exception):
RType("div", "$t3")
with self.assertRaises(Exception):
RType("mul", "$", "3")
def test_bad_1(self):
with self.assertRaises(Exception):
RType("mfhi", "$t3", "$t4")
with self.assertRaises(Exception):
RType("mflo", None)
with self.assertRaises(Exception):
RType("mflo", "tr")
def test_bad_0(self):
with self.assertRaises(Exception):
RType("syscall", "$t3")
class TestIType(unittest.TestCase):
def test_all(self):
try:
IType("addi", "$t3", 6, "$t4")
IType("addiu", "$t3", 6, "$t4")
IType("andi", "$t3", 0000, "$t4")
IType("beq", "$t3", "label", "$t4")
IType("ori", "$t3", 6, "$t1")
IType("xori", "$t3", 6, "$t4")
IType("bgez", "$t3", "label")
IType("sw", "$t0", -4, "$sp")
IType("lw", "$t3", 5)
IType("la", "$t3", 543)
IType("la", "$t3", 5453, "$t2")
IType("li", "$t0", 4)
IType("bne", "$t0", "label", "$t8")
IType("bltz", "$t3", "label")
IType("lui", "$t3", 100)
IType("tgei", "$t3", 100)
except:
self.fail()
def test_bad_2(self):
with self.assertRaises(Exception):
IType("lw", "$t0", immediate=None)
with self.assertRaises(Exception):
IType("add", "$t32", 10)
with self.assertRaises(Exception):
IType("lw", "t32", immediate=None)
with self.assertRaises(Exception):
IType("addi", "$t2", "5", "$t44")
with self.assertRaises(Exception):
IType("la", "$t2", 5, "$t44")
def test_bad_1(self):
with self.assertRaises(Exception):
IType("li", "$t3", 10, source="$t2")
with self.assertRaises(Exception):
IType("lui", "100", 100)
with self.assertRaises(Exception):
IType("la", "$t3f", 34)
class TestJType(unittest.TestCase):
def test_all(self):
try:
JType("j", "func1")
JType("jal", "func2")
JType("jal", 1)
except:
self.fail()
def test_bad(self):
with self.assertRaises(Exception):
JType("add", "funct1")
|
#!/usr/bin/env python
# Colored Logger from https://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output
import os
import logging
import time
import random
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
# The background is set with 40 plus the number of the color, and the
# foreground with 30
# These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def formatter_message(message, use_color=True):
if use_color:
message = message.replace(
"$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
COLORS = {
'WARNING': CYAN,
'INFO': MAGENTA,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (
30 + COLORS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
# Custom logger class with multiple destinations
class ColoredLogger(logging.Logger):
FORMAT = "[%(asctime)s] [%(levelname)s] %(message)s"
COLOR_FORMAT = formatter_message(FORMAT, True)
def __init__(self, name):
logging.Logger.__init__(self, name, logging.DEBUG)
color_formatter = ColoredFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
self.addHandler(console)
return
logging.setLoggerClass(ColoredLogger)
logger = logging.getLogger(__name__)
# logging.basicConfig(format='[%(asctime)s] %(filename)s:%(lineno)d %(levelname)s %(message)s',
# level=logging.DEBUG)
def get_random_system_info():
path = os.environ['PATH']
pwd = os.environ['PWD']
tmpdir = os.environ.get('TMPDIR', '/tmp/')
home = os.environ['HOME']
return random.choice([path, pwd, tmpdir, home])
def get_random_nodejs_package():
modules = [
"underscore", "async", "request", "lodash", "commander", "express", "optimist", "colors", "coffee-script",
"mkdirp", "debug", "q", "chalk", "yeoman-generator", "moment", "glob", "through2", "jade", "uglify-js",
"socket.io", "gulp-util", "redis", "cheerio", "through", "node-uuid", "connect", "winston", "mime",
"minimist", "bluebird", "grunt", "handlebars", "mongodb", "rimraf", "semver", "ejs", "mongoose", "marked",
"xml2js", "underscore.string", "fs-extra", "mocha", "js-yaml", "superagent", "less", "extend", "esprima",
"jquery", "stylus", "body-parser", "xtend", "jsdom", "event-stream", "shelljs", "minimatch", "prompt",
"browserify", "wrench", "ws", "mysql", "readable-stream", "yosay", "inherits", "when", "pkginfo",
"backbone", "nopt", "cli-color", "concat-stream", "passport", "nodemailer", "gulp", "chai", "inquirer",
"nconf", "validator", "yargs", "mustache", "qs", "clean-css", "npm", "ncp", "should", "open", "aws-sdk",
"graceful-fs", "temp", "http-proxy", "iconv-lite", "requirejs", "socket.io-client", "hiredis", "uuid",
"promise", "escodegen", "bower", "oauth", "log4js", "cli-table"
]
return random.choice(modules)
def get_random_status():
all_status = [
"100 Continue", "101 Switching Protocols", "102 Processing", "200 OK", "201 Created", "202 Accepted",
"203 Non-Authoritative Information", "204 No Content", "205 Reset Content", "206 Partial Content",
"207 Multi-Status", "208 Already Reported", "226 IM Used (RFC 3229)", "300 Multiple Choices",
"301 Moved Permanently", "302 Found", "303 See Other", "304 Not Modified", "305 Use Proxy",
"306 Switch Proxy", "307 Temporary Redirect", "308 Permanent Redirect", "prepublish", "postinstall",
"install", "rebuildBundles", "linkMans", "linkBins", "linkStuff", "install", "about to build", "addNamed",
"lock", "etag", "parsed url", "search", "query", "host", "auth", "slashes", "cache add", "GET", "POST",
"trying", "installOne", "tar unpack"
]
return random.choice(all_status)
def get_random_version():
major_ver = random.randint(1, 9)
minor_ver = random.randint(1, 9)
dev_ver = random.randint(1, 9)
return "v" + str(major_ver) + "." + str(minor_ver) + "." + str(dev_ver)
def get_random_message():
sentences = [
"it worked if it ends with ok",
"cli [ 'node', '" + get_random_system_info() + "','install','--verbose' ]",
"using npm@1.4.28 " + get_random_system_info(),
"using node@v0.10.32",
"readDependencies using package.json deps",
"install where, deps " + get_random_system_info() + ", [ '" + get_random_nodejs_package() + "' ] ]",
"readDependencies using package.json deps",
"already installed skipping " + get_random_nodejs_package() + "@" + get_random_version(),
"already installed skipping boganipsum@0.1.0 " + get_random_system_info(),
"build /Users/samuel/Documents/bebusy",
"linkStuff [false, false, false, '/Users/samuel/Documents']",
"rebuildBundles " + get_random_nodejs_package() + "@" + get_random_version(),
"rebuildBundles ['.bin', 'boganipsum', 'colors']",
"install " + get_random_nodejs_package() + "@" + get_random_version(),
"postinstall " + get_random_nodejs_package() + "@" + get_random_version(),
"prepublish " + get_random_nodejs_package() + "@" + get_random_version(),
"preinstall " + get_random_nodejs_package() + "@" + get_random_version(),
"linkStuff " + get_random_nodejs_package() + "@" + get_random_version(),
"linkBins " + get_random_nodejs_package() + "@" + get_random_version(),
"linkMans " + get_random_nodejs_package() + "@" + get_random_version(),
"exit [0, true]",
"ok"
]
return random.choice(sentences)
if __name__ == "__main__":
func_list = [logger.info, logger.debug, logger.warn, logger.error, logger.critical]
while True:
f = random.choice(func_list)
f(get_random_message())
time.sleep(random.random() / 5)
|
<reponame>ntkleynhans/stp-app
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function
import requests
import sys
import json
import os
import codecs
import logging
import logging.handlers
# Some constants
BASEURL = None
# Format the logger output
class CustomFormatter(logging.Formatter):
"""Custom formatter, overrides funcName with value of funcname if it
exists
"""
def format(self, record):
if hasattr(record, 'funcname'):
record.funcName = record.funcname
return super(CustomFormatter, self).format(record)
# Editor testing logging
LOGNAME = "DELETEPROJECT"
LOGFNAME = "deleteproject.log"
LOGLEVEL = logging.DEBUG
try:
fmt = "%(asctime)s [%(levelname)s] %(name)s in %(funcName)s(): %(message)s"
LOG = logging.getLogger(LOGNAME)
formatter = CustomFormatter(fmt)
ofstream = logging.handlers.TimedRotatingFileHandler(LOGFNAME, when="D", interval=1, encoding="utf-8")
ofstream.setFormatter(formatter)
LOG.addHandler(ofstream)
LOG.setLevel(LOGLEVEL)
except Exception as e:
print("FATAL ERROR: Could not create logging instance: {}".format(e), file=sys.stderr)
sys.exit(1)
class Downloader:
def __init__(self, config):
self.user_password = config['<PASSWORD>']
self.user_token = None
self.project_id = None
self.project_user = config['project_user']
self.project_token = None
def login_project(self):
"""
Login as user
Place user 'token' in self.user_token
"""
if self.project_token is None:
LOG.info("{} logging in".format(self.project_user))
headers = {"Content-Type" : "application/json"}
data = {"username": self.project_user, "password": <PASSWORD>, "role" : 'project'}
res = requests.post(BASEURL + "projects/login", headers=headers, data=json.dumps(data))
LOG.info('login(): SERVER SAYS:{}'.format(res.text))
pkg = res.json()
self.project_token = pkg['token']
else:
LOG.info("User logged in already!")
def logout_project(self):
"""
Logout as user
"""
if self.project_token is not None:
headers = {"Content-Type" : "application/json"}
data = {"token": self.project_token}
res = requests.post(BASEURL + "projects/logout", headers=headers, data=json.dumps(data))
LOG.info('logout(): SERVER SAYS:{}'.format(res.text))
self.project_token = None
else:
LOG.info("User not logged in!")
def listprojects(self):
"""
"""
if self.project_token is not None:
LOG.info("Creating project")
headers = {"Content-Type" : "application/json"}
data = {"token": self.project_token }
res = requests.post(BASEURL + "projects/listprojects", headers=headers, data=json.dumps(data))
LOG.info('loadproject(): SERVER SAYS:{}'.format(res.text))
LOG.info(res.status_code)
pkg = res.json()
self.project_info = pkg['projects']
else:
LOG.info("User not logged in!")
def deleteproject(self, projectid):
"""
Delete Project
"""
if self.project_token is not None:
LOG.info("Deleting Project -- {}".format(projectid))
headers = {"Content-Type" : "application/json"}
data = {"token": self.project_token, "projectid" : projectid}
res = requests.post(BASEURL + "projects/deleteproject", headers=headers, data=json.dumps(data))
LOG.info('deleteproject(): SERVER SAYS:{}'.format(res.text))
LOG.info(res.status_code)
self.projectid = None
else:
LOG.info("User not logged in!")
if __name__ == "__main__":
if not os.path.exists('config.json'):
print('ERROR: cannot load config.json file in current folder')
sys.exit(1)
config = json.load(open('config.json'))
BASEURL = config['baseurl']
downloader = Downloader(config)
downloader.login_project()
downloader.listprojects()
for project in downloader.project_info:
print('Deleting project -- {}'.format(project['projectid']))
downloader.deleteproject(project['projectid'])
downloader.logout_project()
|
import math
import torch
import torch.nn as nn
from sparsemax import Sparsemax
class TabNetModel(nn.Module):
params = {}
def __init__(self, **kwargs):
super(TabNetModel, self).__init__()
self.params.update(kwargs)
self.__batch_norm = nn.BatchNorm1d(self.params["n_input_dims"], momentum=self.params["batch_norm_momentum"])
self.__embedding_layers = nn.ModuleDict()
for key, val in sorted(self.params["categorical_config"].items(), key=lambda k: k[1]["idx"]):
self.__embedding_layers[str(val["idx"])] = nn.Embedding(val["n_dims"], self.params["embedding_dim"])
self.__feature_transformer_shared = SharedFeatureTransformer(**self.params)
self.__feature_transformer_individual_base = IndividualFeatureTransformer(-1, **self.params)
self.__feature_transformer_individual = nn.ModuleList([IndividualFeatureTransformer(i, **self.params) for i in range(self.params["n_steps"])])
self.__attentive_transformer = nn.ModuleList(
[AttentiveTransformer(i, **self.params) for i in range(self.params["n_steps"])])
self.__reconstruction_fc = nn.ModuleList([nn.Linear(int(self.params["n_dims_d"] + self.params["n_dims_a"]), self.params["n_input_dims"]) for i in range(self.params["n_steps"])])
self.__output_fc = nn.Linear(self.params["n_dims_d"], self.params["n_output_dims"])
def forward(self, X_continuous, X_embedding, init_mask, mask_input=False):
if len(list(X_continuous.size())) != 2:
raise ValueError("Shape mismatch: expected order 2 tensor, got order {} tensor".format(len(list(X_continuous.size()))))
X = torch.cat([X_continuous] + [self.__embedding_layers[str(key)](val) for key, val in X_embedding.items() if key != -1], dim=-1)
if mask_input:
X = init_mask * X
X_bn = self.__batch_norm(X)
a_i_minus_1 = self.__feature_transformer_individual_base(self.__feature_transformer_shared(X_bn))[..., self.params["n_dims_d"] :]
p_i_minus_1 = (init_mask)
gamma = self.params["gamma"] * torch.ones_like(X)
step_wise_outputs = []
step_wise_feature_reconstruction = []
step_wise_masks = []
for i in range(self.params["n_steps"]):
mask_i = self.__attentive_transformer[i](p_i_minus_1, a_i_minus_1)
feat_transform_i = self.__feature_transformer_individual[i](self.__feature_transformer_shared(mask_i * X_bn))
d_i, a_i = (feat_transform_i[..., : self.params["n_dims_d"]], feat_transform_i[..., self.params["n_dims_d"] :])
p_i_minus_1 = p_i_minus_1 * (gamma - mask_i)
step_wise_masks.append(mask_i)
step_wise_outputs.append(nn.functional.relu(d_i))
step_wise_feature_reconstruction.append(self.__reconstruction_fc[i](feat_transform_i))
a_i_minus_1 = a_i
reconstructions = torch.stack(step_wise_feature_reconstruction, dim=0).sum(dim=0, keepdim=False)
logits = self.__output_fc(torch.stack(step_wise_outputs, dim=0).sum(dim=0, keepdim=False))
return X, logits, reconstructions, tuple(step_wise_masks)
class SharedFeatureTransformer(nn.Module):
params = {}
def __init__(self, **kwargs):
super(SharedFeatureTransformer, self).__init__()
self.params.update(kwargs)
self.fc_one = nn.Linear(self.params["n_input_dims"], (self.params["n_dims_a"] + self.params["n_dims_d"]) * 2)
self.bn_one = nn.BatchNorm1d((self.params["n_dims_a"] + self.params["n_dims_d"]) * 2, momentum=self.params["batch_norm_momentum"])
self.fc_two = nn.Linear((self.params["n_dims_a"] + self.params["n_dims_d"]), (self.params["n_dims_a"] + self.params["n_dims_d"]) * 2)
self.bn_two = nn.BatchNorm1d((self.params["n_dims_a"] + self.params["n_dims_d"]) * 2, momentum=self.params["batch_norm_momentum"])
self.dropout = nn.Dropout(p=self.params["dropout_p"])
def forward(self, X):
X_slice_one = nn.functional.glu(self.bn_one(self.fc_one(X)))
X_slice_two = nn.functional.glu(self.bn_two(self.fc_two(X_slice_one)))
return self.dropout((X_slice_two + X_slice_one) * math.sqrt(0.5))
class IndividualFeatureTransformer(nn.Module):
params = {}
step_id = 0
def __init__(self, step_id, **kwargs):
super(IndividualFeatureTransformer, self).__init__()
self.step_id = step_id
self.params.update(kwargs)
self.fc_one = nn.Linear((self.params["n_dims_a"] + self.params["n_dims_d"]), (self.params["n_dims_a"] + self.params["n_dims_d"]) * 2)
self.bn_one = nn.BatchNorm1d((self.params["n_dims_a"] + self.params["n_dims_d"]) * 2, momentum=self.params["batch_norm_momentum"])
self.fc_two = nn.Linear((self.params["n_dims_a"] + self.params["n_dims_d"]), (self.params["n_dims_a"] + self.params["n_dims_d"]) * 2)
self.bn_two = nn.BatchNorm1d((self.params["n_dims_a"] + self.params["n_dims_d"]) * 2, momentum=self.params["batch_norm_momentum"])
self.dropout = nn.Dropout(p=self.params["dropout_p"])
def forward(self, X):
X_slice_one = nn.functional.glu(self.bn_one(self.fc_one(X)))
X_slice_one = self.dropout((X_slice_one + X) * math.sqrt(0.5))
X_slice_two = nn.functional.glu(self.bn_two(self.fc_two(X_slice_one)))
return (X_slice_one + X_slice_two) * math.sqrt(0.5)
class AttentiveTransformer(nn.Module):
params = {}
step_id = 0
def __init__(self, step_id, **kwargs):
super(AttentiveTransformer, self).__init__()
self.step_id = step_id
self.params.update(kwargs)
self.fc = nn.Linear(self.params["n_dims_a"], self.params["n_input_dims"])
self.bn = nn.BatchNorm1d(num_features=self.params["n_input_dims"], momentum=self.params["batch_norm_momentum"])
self.sparsemax = Sparsemax(dim=-1)
def forward(self, p_i_prev, a_i_prev):
return self.sparsemax(p_i_prev * self.bn(self.fc(a_i_prev))) |
from project import app
from flask import render_template, redirect, request, url_for, session
from project.com.dao.LoginDAO import LoginDAO
from project.com.vo.LoginVO import LoginVO
from project.com.dao.BankDAO import BankDAO
from project.com.vo.BankVO import BankVO
from project.com.dao.BranchDAO import BranchDAO
from project.com.vo.BranchVO import BranchVO
from project.com.vo.StaffVO import StaffVO
from project.com.dao.StaffDAO import StaffDAO
from project.com.vo.ComplaintVO import ComplaintVO
from project.com.dao.ComplaintDAO import ComplaintDAO
from project.com.vo.ChequeVO import ChequeVO
from project.com.dao.ChequeDAO import ChequeDAO
@app.route('/checkLogin', methods=['post'])
def checkLogin():
# Creating objects of LoginVO and LoginDAO
loginDAO = LoginDAO()
loginVO = LoginVO()
# Getting infromation from HTML form and storing it in VO objects
loginVO.loginEmail = request.form['loginEmail']
loginVO.loginPassword = request.form['loginPassword']
# print(loginVO.loginEmail)
# print(loginVO.loginPassword)
loginDict = loginDAO.searchLogin(loginVO)
print(loginDict)
# print(loginDict['loginPassword'])
# print(loginVO.loginPassword)
if(len(loginDict) == 0):
return render_template('admin/login.html', msg = 'Please enter valid Email Address')
elif(loginVO.loginPassword != loginDict[0]['loginPassword']):
return render_template('admin/login.html', msg='Please enter correct password...!!')
elif(loginDict[0]['loginRole'] == 'admin'):
# Storing loginEmail & Password in Session
session['loginId'] = loginDict[0]['loginId']
session['loginRole'] = loginDict[0]['loginRole']
complaintDAO = ComplaintDAO()
complaintVO = ComplaintVO()
complaintVO.complaintTo_LoginId = str(session['loginId'])
complaintDict = complaintDAO.getComplaintData(complaintVO)
print('complaintDict: {}'.format(complaintDict))
chequeDAO = ChequeDAO()
chequeVO = ChequeVO()
chequeDict = chequeDAO.getChequeData()
print('chequeDict: {}'.format(chequeDict))
return render_template('admin/index.html', complaintDict=complaintDict, chequeDict=chequeDict)
elif (loginDict[0]['loginRole'] == 'bank'):
# Storing loginEmail & Password in Session
session['loginId'] = loginDict[0]['loginId']
session['loginRole'] = loginDict[0]['loginRole']
bankVO = BankVO()
# bankVO.bankId = str(session['loginId'])
bankDAO = BankDAO()
# bankdata = bankDAO.getBankId(bankVO)
loginVO.loginId = str(session['loginId'])
bankId = bankDAO.getBankId(loginVO)
print("+++++++++++++++++++++++++BANKDATA+++++++++++++++++++++++++++++++++++")
print(bankId)
bankId = bankId[0]['bankId']
print(bankId)
session['bankId'] = bankId
complaintDAO = ComplaintDAO()
complaintVO = ComplaintVO()
complaintVO.complaintTo_LoginId = str(session['loginId'])
complaintDict = complaintDAO.getComplaintData(complaintVO)
print('complaintDict: {}'.format(complaintDict))
# Total Branches, Employees , Issued Cheques
branchVO = BranchVO()
branchDAO = BranchDAO()
branchVO.branch_BankId = str(bankId)
totalBranchesDict = branchDAO.getBankBranches(branchVO)
staffVO = StaffVO()
staffDAO = StaffDAO()
staffVO.staff_BankId = str(bankId)
totalEmployeesDict = staffDAO.getBankEmployees(staffVO)
chequeDAO = ChequeDAO()
chequeVO = ChequeVO()
chequeVO.cheque_FromBankId = str(bankId)
totalChequesDict = chequeDAO.getIssuedCheques(chequeVO)
mainDisplayDict = {}
mainDisplayDict.update(totalBranchesDict)
mainDisplayDict.update(totalEmployeesDict)
mainDisplayDict.update(totalChequesDict)
print(mainDisplayDict)
return render_template('bank/index.html',complaintDict=complaintDict, mainDisplayDict=mainDisplayDict)
elif (loginDict[0]['loginRole'] == 'cashier'):
# Storing loginEmail & Password in Session
session['loginId'] = loginDict[0]['loginId']
session['loginRole'] = loginDict[0]['loginRole']
staffVO = StaffVO()
staffDAO = StaffDAO()
staffVO.staff_LoginId = str(session['loginId'])
staffIdDict = staffDAO.getStaffIds(staffVO)
print(staffIdDict)
staffId = staffIdDict[0]['staffId']
staff_BankId = staffIdDict[0]['staff_BankId']
staff_BranchId = staffIdDict[0]['staff_BranchId']
bank_LoginId = staffIdDict[0]['bank_LoginId']
print(staffId)
session['staffId'] = staffId
session['staff_BankId'] = staff_BankId # For cheque
session['staff_BranchId'] = staff_BranchId
session['bank_LoginId'] = bank_LoginId # Not needed
# For display on index page
chequeDAO = ChequeDAO()
chequeVO = ChequeVO()
chequeVO.cheque_StaffId = str(staffId)
chequeDict = chequeDAO.StaffGetIssuedCheques(chequeVO)
# branchVO = BranchVO()
# branchDAO = BranchDAO()
# branchVO.branchId = str(session['loginId'])
# branchdata = branchDAO.getBranchId(branchVO)
# branchId = branchdata[0]
# return redirect(url_for(checkLogin), msg = 'You are not admin...!!!')
return render_template('staff/index.html', chequeDict=chequeDict)
@app.route('/indexpage', methods=['get'])
def indexpage():
loginDAO = LoginDAO()
loginVO = LoginVO()
if session['loginRole'] == 'admin':
complaintDAO = ComplaintDAO()
complaintVO = ComplaintVO()
complaintVO.complaintTo_LoginId = str(session['loginId'])
complaintDict = complaintDAO.getComplaintData(complaintVO)
print('complaintDict: {}'.format(complaintDict))
chequeDAO = ChequeDAO()
chequeVO = ChequeVO()
chequeDict = chequeDAO.getChequeData()
print('chequeDict: {}'.format(chequeDict))
return render_template('admin/index.html', complaintDict=complaintDict, chequeDict=chequeDict)
elif session['loginRole'] == 'bank':
bankVO = BankVO()
# bankVO.bankId = str(session['loginId'])
bankDAO = BankDAO()
# bankdata = bankDAO.getBankId(bankVO)
loginVO.loginId = str(session['loginId'])
bankId = bankDAO.getBankId(loginVO)
print("+++++++++++++++++++++++++BANKDATA+++++++++++++++++++++++++++++++++++")
print(bankId)
bankId = bankId[0]['bankId']
print(bankId)
session['bankId'] = bankId
complaintDAO = ComplaintDAO()
complaintVO = ComplaintVO()
complaintVO.complaintTo_LoginId = str(session['loginId'])
complaintDict = complaintDAO.getComplaintData(complaintVO)
print('complaintDict: {}'.format(complaintDict))
# Total Branches, Employees , Issued Cheques
branchVO = BranchVO()
branchDAO = BranchDAO()
branchVO.branch_BankId = str(bankId)
totalBranchesDict = branchDAO.getBankBranches(branchVO)
staffVO = StaffVO()
staffDAO = StaffDAO()
staffVO.staff_BankId = str(bankId)
totalEmployeesDict = staffDAO.getBankEmployees(staffVO)
chequeDAO = ChequeDAO()
chequeVO = ChequeVO()
chequeVO.cheque_FromBankId = str(bankId)
totalChequesDict = chequeDAO.getIssuedCheques(chequeVO)
mainDisplayDict = {}
mainDisplayDict.update(totalBranchesDict)
mainDisplayDict.update(totalEmployeesDict)
mainDisplayDict.update(totalChequesDict)
print(mainDisplayDict)
return render_template('bank/index.html', complaintDict=complaintDict, mainDisplayDict=mainDisplayDict)
elif session['loginRole'] == 'cashier':
staffVO = StaffVO()
staffDAO = StaffDAO()
staffVO.staff_LoginId = str(session['loginId'])
staffIdDict = staffDAO.getStaffIds(staffVO)
print(staffIdDict)
staffId = staffIdDict[0]['staffId']
staff_BankId = staffIdDict[0]['staff_BankId']
staff_BranchId = staffIdDict[0]['staff_BranchId']
bank_LoginId = staffIdDict[0]['bank_LoginId']
print(staffId)
session['staffId'] = staffId
session['staff_BankId'] = staff_BankId # For cheque
session['staff_BranchId'] = staff_BranchId
session['bank_LoginId'] = bank_LoginId # Not needed
# For display on index page
chequeDAO = ChequeDAO()
chequeVO = ChequeVO()
chequeVO.cheque_StaffId = str(staffId)
chequeDict = chequeDAO.StaffGetIssuedCheques(chequeVO)
# branchVO = BranchVO()
# branchDAO = BranchDAO()
# branchVO.branchId = str(session['loginId'])
# branchdata = branchDAO.getBranchId(branchVO)
# branchId = branchdata[0]
# return redirect(url_for(checkLogin), msg = 'You are not admin...!!!')
return render_template('staff/index.html', chequeDict=chequeDict)
@app.route('/logout', methods=['get'])
def logout():
# Clearing session
session.clear()
return render_template('admin/login.html')
|
from __future__ import annotations
from typing import TextIO, Union
import abc
import pathlib
import re
Stack = "list[Union[Value, Operator]]"
def process_input(file: TextIO) -> list[Parser]:
return [
Parser(line).parse()
for line in file
if not line.isspace()
]
class Parser:
token_pattern = re.compile(r"\d+|[()+*]")
def __init__(self, line: str):
self.tokens = self.tokenize(line)
self.root = Parenthetical()
def evaluate(self) -> int:
return self.root.value
def parse(self) -> Parser:
parentheticals = [self.root]
for token in self.tokens:
if token == "(":
parenthetical = Parenthetical()
parentheticals[-1].push(parenthetical)
parentheticals.append(parenthetical)
elif token == ")":
parentheticals.pop()
elif token.isdigit():
parentheticals[-1].push(Number(token))
else:
parentheticals[-1].push(Operator.create(token))
return self
def tokenize(self, line: str) -> list[str]:
return self.token_pattern.findall(line)
class Value(abc.ABC):
@property
@abc.abstractmethod
def value(self) -> int:
...
class Number(Value):
def __init__(self, token: Union[str, int]):
self._value = int(token)
@property
def value(self) -> int:
return self._value
def __repr__(self):
return f"<{type(self).__name__}: {self.value}>"
class Parenthetical(Value):
def __init__(self):
self.tokens = []
@property
def value(self) -> int:
if not self.tokens:
return 0
stack = self.do_addition(self.tokens[::-1]) # Reverse it so that we don't need to do pop(0)
return self.do_multiplication(stack)
def do_multiplication(self, stack: Stack) -> int:
value = stack.pop().value
while stack:
operator = stack.pop()
next_value = stack.pop().value
value = operator.evaluate(value, next_value)
return value
def do_addition(self, stack: Stack) -> Stack:
new_stack = [stack.pop()]
while stack:
operator = stack.pop()
next_value = stack.pop()
if isinstance(operator, AdditionOperator):
value = new_stack.pop().value
new_stack.append(
Number(operator.evaluate(value, next_value.value))
)
else:
new_stack.append(operator)
new_stack.append(next_value)
return new_stack[::-1]
def push(self, token: Union[Value, Operator]):
self.tokens.append(token)
def __repr__(self):
return f"<{type(self).__name__}: {self.tokens}>"
class Operator(abc.ABC):
@abc.abstractmethod
def evaluate(self, left: int, right: int) -> int:
...
@classmethod
def create(cls, token: str) -> Operator:
if token == "+":
return AdditionOperator()
elif token == "*":
return MultiplicationOperator()
def __repr__(self):
return f"<{type(self).__name__}>"
class AdditionOperator(Operator):
def evaluate(self, augend: int, addend: int) -> int:
return augend + addend
class MultiplicationOperator(Operator):
def evaluate(self, multiplicand: int, multiplier: int) -> int:
return multiplicand * multiplier
if __name__ == "__main__":
input_file_path = pathlib.Path(__file__).parent / "test_input_b.txt"
with input_file_path.open() as input_file:
parsers = process_input(input_file)
print("Test Data")
print(
"\n".join(
f"{parser.evaluate()} should be {value}"
for parser, value in zip(parsers, (51, 46, 1445, 669060, 23340))
)
)
print("Final")
input_file_path = pathlib.Path(__file__).parent / "input.txt"
with input_file_path.open() as input_file:
parsers = process_input(input_file)
print(sum(parser.evaluate() for parser in parsers))
|
#!usr/bin/env python
import rospy
import numpy as np
import math
import cvxpy
import tf
from geometry_msgs.msg import Twist
from nav_msgs.msg import Path
from nav_msgs.msg import Odometry
from prius_msgs.msg import Control
from geometry_msgs.msg import PoseStamped
global current_state
global path_message
global state_ref
global d_ref
max_index = 0
target_vel = 10
NX = 4
NU = 2
horizon_length = 20
R = np.diag([0.01, 0.01]) # input cost matrix
Rd = np.diag([0.01, 0.01]) # input difference cost matrix
Q = np.diag([1.0, 1.0, 1.0, 1.0]) # state cost matrix
Qf = Q # state final matrix
WB = 1.983 # [m]
MAX_ITER = 3
DU_TH = 0.1
DT = 0.2
MAX_STEER = math.radians(30.0) # maximum steering angle [rad]
MAX_DSTEER = math.radians(30.0) # maximum steering speed [rad/s]
MAX_SPEED = 15.0 # maximum speed [m/s]
MIN_SPEED = -10.0 # minimum speed [m/s]
MAX_ACCEL = 1.0 # maximum accel [m/ss]
oa, odelta = None, None
class State:
def __init__(self, x = 0, y = 0, v = 0, yaw = 0):
self.x = x
self.y = y
self.v = v
self.yaw = yaw
self.predelta = None
def update(self, data):
print "updating state \n"
self.x = data.pose.pose.position.x
self.y = data.pose.pose.position.y
# quarternion to euler conversion
siny = +2.0 * (data.pose.pose.orientation.w *
data.pose.pose.orientation.z +
data.pose.pose.orientation.x *
data.pose.pose.orientation.y)
cosy = +1.0 - 2.0 * (data.pose.pose.orientation.y *
data.pose.pose.orientation.y +
data.pose.pose.orientation.z *
data.pose.pose.orientation.z)
self.yaw = math.atan2(siny, cosy) # yaw in radians
self.v = (data.twist.twist.linear.x * math.cos(self.yaw) +
data.twist.twist.linear.y * math.sin(self.yaw))
print self.x, self.y, self.v, self.yaw, "\n"
current_state = State(x=-0.0, y=-0.0, yaw=0.0, v=0.0)
def dist(a,x,y):
return (((a.pose.position.x - x)**2) + ((a.pose.position.y - y)**2))**0.5
def prius_pub(data):
'''
publishes the velocity and steering angle
published on topic : ackermann_cmd_topic
'''
global prius_vel
prius_vel = Control()
if(data.linear.x > 0):
prius_vel.throttle = data.linear.x
prius_vel.brake = 0
# print ("acc")
# print (prius_vel.throttle)
if(data.linear.x < 0):
prius_vel.brake = -data.linear.x
prius_vel.throttle = 0
# print ("brake")
# print (prius_vel.brake)
prius_vel.steer = data.angular.z / 30
#print "steering:", prius_vel.steer
pub_vel.publish(prius_vel)
def calc_nearest_index(state, path_msg):
'''
calculates minimum distance between path and a given point
'''
distances = []
for i in range(len(path_msg.poses)):
a = path_msg.poses[i]
distances += [dist(a, state.x, state.y)]
ep = min(distances)
cp = distances.index(ep)
return cp
def calc_ref_trajectory(path_msg):
ref_state = np.zeros((NX, horizon_length + 1))
ref_d = np.zeros((1, horizon_length + 1))
global current_state
nearest_index = calc_nearest_index(current_state, path_msg)
print "nearest index is: ", nearest_index, "\n"
for i in range(horizon_length + 1):
if nearest_index + i < max_index:
siny_path = +2.0 * (path_msg.poses[nearest_index + i].pose.orientation.w *
path_msg.poses[nearest_index + i].pose.orientation.z +
path_msg.poses[nearest_index + i].pose.orientation.x *
path_msg.poses[nearest_index + i].pose.orientation.y)
cosy_path = +1.0 - 2.0 * (path_msg.poses[nearest_index + i].pose.orientation.y *
path_msg.poses[nearest_index + i].pose.orientation.y +
path_msg.poses[nearest_index + i].pose.orientation.z *
path_msg.poses[nearest_index + i].pose.orientation.z)
path_yaw = math.atan2(siny_path, cosy_path) # yaw in radians
ref_state[0,i] = path_msg.poses[nearest_index + i].pose.position.x
ref_state[1,i] = path_msg.poses[nearest_index + i].pose.position.y
ref_state[2,i] = target_vel
ref_state[3,i] = path_yaw
ref_d[0,i] = 0.0
else:
siny_path = +2.0 * (path_msg.poses[max_index].pose.orientation.w *
path_msg.poses[max_index].pose.orientation.z +
path_msg.poses[max_index].pose.orientation.x *
path_msg.poses[max_index].pose.orientation.y)
cosy_path = +1.0 - 2.0 * (path_msg.poses[max_index].pose.orientation.y *
path_msg.poses[max_index].pose.orientation.y +
path_msg.poses[max_index].pose.orientation.z *
path_msg.poses[max_index].pose.orientation.z)
path_yaw = math.atan2(siny_path, cosy_path) # yaw in radians
ref_state[0,i] = path_msg.poses[max_index].pose.position.x
ref_state[1,i] = path_msg.poses[max_index].pose.position.y
ref_state[2,i] = target_vel
ref_state[3,i] = path_yaw
ref_d[0,i] = 0.0
return ref_state, ref_d
def get_nparray_from_matrix(x):
return np.array(x).flatten()
def predict_motion(x0, oa, od, xref):
xbar = xref * 0.0
for i in range(len(x0)):
xbar[i, 0] = x0[i]
state = State(x=x0[0], y=x0[1], yaw=x0[3], v=x0[2])
for (ai, di, i) in zip(oa, od, range(1, horizon_length + 1)):
state = update_state(state, ai, di)
xbar[0, i] = state.x
xbar[1, i] = state.y
xbar[2, i] = state.v
xbar[3, i] = state.yaw
return xbar
def update_state(state, a, delta):
# input check
if delta >= MAX_STEER:
delta = MAX_STEER
elif delta <= -MAX_STEER:
delta = -MAX_STEER
state.x = state.x + state.v * math.cos(state.yaw) * DT
state.y = state.y + state.v * math.sin(state.yaw) * DT
state.yaw = state.yaw + state.v / WB * math.tan(delta) * DT
state.v = state.v + a * DT
if state. v > MAX_SPEED:
state.v = MAX_SPEED
elif state. v < MIN_SPEED:
state.v = MIN_SPEED
return state
def get_linear_model_matrix(v, phi, delta):
A = np.matrix(np.zeros((NX, NX)))
A[0, 0] = 1.0
A[1, 1] = 1.0
A[2, 2] = 1.0
A[3, 3] = 1.0
A[0, 2] = DT * math.cos(phi)
A[0, 3] = - DT * v * math.sin(phi)
A[1, 2] = DT * math.sin(phi)
A[1, 3] = DT * v * math.cos(phi)
A[3, 2] = DT * math.tan(delta) / WB
B = np.matrix(np.zeros((NX, NU)))
B[2, 0] = DT
B[3, 1] = DT * v / (WB * math.cos(delta) ** 2)
C = np.zeros(NX)
C[0] = DT * v * math.sin(phi) * phi
C[1] = - DT * v * math.cos(phi) * phi
C[3] = v * delta / (WB * math.cos(delta) ** 2)
return A, B, C
def iterative_linear_mpc_control(xref, x0, dref, oa, od):
"""
MPC contorl with updating operational point iteraitvely
"""
if oa is None or od is None:
oa = [0.0] * horizon_length
od = [0.0] * horizon_length
for i in range(MAX_ITER):
print "optimal acceleration is: ", oa, "\n"
xbar = predict_motion(x0, oa, od, xref)
poa, pod = oa[:], od[:]
oa, od, ox, oy, oyaw, ov = linear_mpc_control(xref, xbar, x0, dref)
du = sum(abs(oa - poa)) + sum(abs(od - pod)) # calc u change value
if du <= DU_TH:
break
else:
print "Iterative is max iter"
return oa, od, ox, oy, oyaw, ov
def linear_mpc_control(xref, xbar, x0, dref):
"""
linear mpc control
xref: reference point
xbar: operational point
x0: initial state
dref: reference steer angle
"""
x = cvxpy.Variable((NX, horizon_length + 1))
u = cvxpy.Variable((NU, horizon_length))
cost = 0.0
constraints = []
for t in range(horizon_length):
cost += cvxpy.quad_form(u[:, t], R)
if t != 0:
cost += cvxpy.quad_form(xref[:, t] - x[:, t], Q)
A, B, C = get_linear_model_matrix(
xbar[2, t], xbar[3, t], dref[0, t])
constraints += [x[:, t + 1] == A * x[:, t] + B * u[:, t] + C]
if t < (horizon_length - 1):
cost += cvxpy.quad_form(u[:, t + 1] - u[:, t], Rd)
constraints += [cvxpy.abs(u[1, t + 1] - u[1, t])
<= MAX_DSTEER * DT]
cost += cvxpy.quad_form(xref[:, horizon_length] - x[:, horizon_length], Qf)
constraints += [x[:, 0] == x0]
constraints += [x[2, :] <= MAX_SPEED]
constraints += [x[2, :] >= MIN_SPEED]
constraints += [cvxpy.abs(u[0, :]) <= MAX_ACCEL]
constraints += [cvxpy.abs(u[1, :]) <= MAX_STEER]
prob = cvxpy.Problem(cvxpy.Minimize(cost), constraints)
prob.solve(solver=cvxpy.ECOS, verbose=False)
if prob.status == cvxpy.OPTIMAL or prob.status == cvxpy.OPTIMAL_INACCURATE:
ox = get_nparray_from_matrix(x.value[0, :])
oy = get_nparray_from_matrix(x.value[1, :])
ov = get_nparray_from_matrix(x.value[2, :])
oyaw = get_nparray_from_matrix(x.value[3, :])
oa = get_nparray_from_matrix(u.value[0, :])
odelta = get_nparray_from_matrix(u.value[1, :])
else:
print "Error: Cannot solve mpc.."
oa, odelta, ox, oy, oyaw, ov = None, None, None, None, None, None
return oa, odelta, ox, oy, oyaw, ov
def callback_path(data):
global path_message
global max_index
path_message = data
max_index = len(data.poses)
def callback_vel(data):
global target_vel
target_vel = data.linear.x
def callback_feedback(data):
global state_ref
global target_vel
global current_state
global d_ref
global path_message
global oa
global odelta
global r
output = Twist()
optimal_path = Path()
optimal_path.header.frame_id = rospy.get_param('~output_frame', 'map')
print "entered odom_callback \n"
print "current_state is ", data.pose.pose.position.x, data.pose.pose.position.y, data.twist.twist.linear.x
current_state.update(data)
# print current_state.x, current_state.y, current_state.v, current_state.yaw, "\n"
state_ref, d_ref = calc_ref_trajectory(path_message)
# print "reference state is: ", state_ref, "\n"
x0 = [current_state.x, current_state.y, current_state.v, current_state.yaw]
oa, odelta, ox, oy, oyaw, ov = iterative_linear_mpc_control(state_ref, x0, d_ref, oa, odelta)
print "optimal x is: ", ox, "\n"
print "optimal y is: ", oy, "\n"
print "optimal yaw is: ", oyaw, "\n"
print "optimal v is: ", ov, "\n"
for i in range(len(ox)):
pose = PoseStamped()
pose.pose.position.x = ox[i]
pose.pose.position.y = oy[i]
q = tf.transformations.quaternion_from_euler(0, 0, oyaw[i])
pose.pose.orientation.x = q[0]
pose.pose.orientation.y = q[1]
pose.pose.orientation.z = q[2]
pose.pose.orientation.w = q[3]
optimal_path.poses.append(pose)
if odelta is not None:
di, ai = odelta[0], oa[0]
print "acceleration: ", oa, "\n"
print "steering: ", odelta, "\n"
print "\n"
output.linear.x = ai
output.angular.z = di * 180 / math.pi
prius_pub(output)
pub_ref.publish(optimal_path)
def start_mpc():
global pub_vel
global pub_ref
ackermann_cmd_topic = rospy.get_param('~ackermann_cmd_topic', '/prius')
rospy.init_node("model_predictive_control", anonymous=True)
pub_vel = rospy.Publisher(ackermann_cmd_topic, Control, queue_size=10)
pub_ref = rospy.Publisher("reference_path", Path, queue_size=10)
rospy.Subscriber("cmd_vel", Twist, callback_vel, queue_size=1)
rospy.Subscriber("astroid_path", Path, callback_path, queue_size=1)
rospy.Subscriber("base_pose_ground_truth", Odometry, callback_feedback, queue_size=1)
rospy.spin()
if __name__ == '__main__':
start_mpc() |
<reponame>aidden-laoch/sabre
"""
This module provides functionality for steganography.
It uses a configuration string with custom syntax to describe *where* and *how* will data be injected in a template.
**Stego Configuration Syntax Description**
* Tags
Tags are used to specify the functions that will be applied on each byte at injection and extraction.
* Templates
Templates are hex strings containing the Tag Letters wherever arbitrary data can be injected.
Example Syntax:
.. code:: python
# Comments symbol is traditionally the '#'
# -- Tags --
# Those are the tags. Declared as:
# Letter:<InjectionFunction>:<ExtractionFunction>
# Functions get evaluated with python 'eval' under the following context:
# _data_: byte to be injected, extracted
# _len_: packet length
# _index_: index of the byte injected/extracted
# _capacity_: Byte capacity of the packet as declared below
# _sxor_: Function that gets 2 char bytes and returns their XOR'd value
#
# Data functions that are reflective [applied twice to an input returns the input (e.g XOR operation)], do not need the <ExtractionFunction> part.
# Do need the last colon (:) though.
#
# Examples:
X:_data_: # inject the data as provided
K:_sxor_(_data_, '\\xaa'): # inject the data xor'd with '\\xaa' byte. Use the same function for extraction
L:chr(ord(_data_) + 1):chr(ord(_data_) - 1) # inject each byte incremented by 1. Decrement each byte before extraction.
# -- Packet Templates --
# Packet Templates, declared as:
# packet_template_name = '''Hex of the template packet with Tag Letters among the valid bytes''' []<groups>
# Groups are declared as:
# TagLetter[start:end]
# and will automatically replace all bytes between 'start' and 'end' with the given Tag Letter
#
# Those two templates are identical (Notice the Tag Letters between the Hex Values in `ip_tcp_syn2`)
ip_tcp_syn1 = '''450000280001000040067ccd7f0000017f00000100140050000000000000000050022000917c0000'''L[4:6],K[24:28],X[20:22]
ip_tcp_syn2 = '''45000028LLLL000040067ccd7f0000017f000001XXXX0050KKKKKKKK0000000050022000917c0000'''
# Whitespace and comments won't break the Strings
mac_ip_tcp_syn = '''ffffffffffff0000000000000800 # MAC header
450000280001000040067ccd7f0000017f000001 # IP header
00140050000000000000000050022000917c0000'''K[18:20],K[38:42],K[34:36]
"""
from covertutils.exceptions import *
from os import urandom
import logging
LOG = logging.getLogger( __name__ )
import re
from covertutils.helpers import sxor as _sxor_
from covertutils.helpers import str_similar
from copy import deepcopy
import codecs
try:
bytes # Python 3
except NameError:
bytes = str # Python 2
class StegoInjector :
__comment_sign = '#'
__pkt_regex = """(\w+)\s*=\s*['"]{1,3}([\w*\s]*)['"]{1,3}\s*([\[\d+:\d+\]\,[A-Z]*)"""
__tag_regex = '([A-Za-z]+):(.*?):(.*)]?'
__data_regex = '[\s(]+_data_[,\s\.)]+'
__group_regex = '([A-Za-b])\[(\d+)+:(\d+)\]'
__comment_regex = '%s.*' % __comment_sign
__not_permitted_chars = '1234567890ABCDEFabcdef'
def __init__( self, stego_template, hex_inject = False ) :
self.hex_inject = hex_inject
self.__tags, self.__packets = self.__parseStegoScheme( stego_template )
# print self.__packets
def __parseStegoScheme( self, stego_template ) :
# Remove comments
stego_template = re.sub( self.__comment_regex,'',stego_template )
tags = re.findall( self.__tag_regex, stego_template )
tag_dict = {}
for tag, inj_function, extr_function in tags :
tag = tag.upper()
# print tag, function
if len(tag) != 1 :
raise StegoSchemeParseException( "MultiCharacter Tags are not allowed. Redefine '%s'" % tag )
if tag in self.__not_permitted_chars :
raise StegoSchemeParseException( "Tag '%s' is a Letter used in Hex. Tags must be different from 'ABCDEF'" % tag )
if tag in list(tag_dict.keys()) :
raise StegoSchemeParseException( "Tag '%s' is already defined." % tag )
inj_function = '(%s)' % inj_function
extr_function = '(%s)' % extr_function
if extr_function == '()' :
extr_function = inj_function
f_match = re.findall( self.__data_regex, inj_function )
if not f_match :
raise StegoSchemeParseException( "Injection function for Tag: '%s' does not contain '_data_' keyword" % tag )
f_match = re.findall( self.__data_regex, extr_function )
if not f_match :
raise StegoSchemeParseException( "Extraction function for Tag: '%s' does not contain '_data_' keyword" % tag )
# print tag, function
LOG.debug("Adding tag '%s' with Data Functions: < %s > : < %s >" % (tag, inj_function, extr_function))
tag_dict[tag] = {
"inj_function": inj_function,
"extr_function": extr_function
}
pkt_dict = {}
pkt_list = re.findall( self.__pkt_regex, stego_template)
for pkt_name, hex_pkt, groups in pkt_list :
# print pkt_name, hex_pkt, groups
hex_pkt = re.sub("\s*",'', hex_pkt)
# print "%s" % groups
if groups :
# print "Groups Found!"
group_str_list = groups.split(',')
group_list = []
for group_str in group_str_list :
if not group_str : continue
# print "+"+group_str
formatted_group = re.findall( self.__group_regex, group_str)[0]
# print formatted_group
group_list.append( formatted_group )
hex_pkt = self.__applyGroups( hex_pkt, group_list, list(tag_dict.keys()))
if self.__checkPermittedChars( hex_pkt, list(tag_dict.keys()) ) :
cap = self.__getCapacityDict( hex_pkt , list(tag_dict.keys()) )
pkt_dict[ pkt_name ] = ( hex_pkt, cap )
return tag_dict, pkt_dict
def getTemplates( self ) :
return list(self.__packets.keys())
def getCapacityDict( self, template ) :
"""
:param str template: The name of the template whose capacity dict is desired.
:rtype: dict
:return: The template's capacity dict containing Tag Letters as keys and capacity of each Tag in bytes as values.
A sample *configuration* :
.. code:: python
X:_data_:
Y:_data_:
sample='''4141XX4242YYYY'''
Example ::
psi = StegoInjector( configuration )
psi.getCapacityDict( 'sample1' )
{ 'X' : 1, 'Y' : 2 }
"""
return self.__packets[template][1]
def getCapacity( self, template, tag = None ) :
"""
:param str template: The name of the template whose capacity is desired.
:rtype: int
:return: The template's capacity in bytes
"""
return sum(self.__packets[template][1].values())
def __getCapacityDict( self, pkt, tag_chars ) :
caps = {}
for tag in tag_chars :
caps[tag] = pkt.count(tag) // 2 # in bytes
if self.hex_inject : # if bytes injected in hex
caps[tag] = caps[tag] // 2 # furtherly divide by 2
return caps
def __checkPermittedChars( self, pkt, tag_chars ) :
for c in pkt :
c = c.upper()
if c not in self.__not_permitted_chars and c not in tag_chars :
raise StegoSchemeParseException( "Char '%s' in Packet '%s' is not Hex Digit nor Tag" % (c, pkt) )
return False
return True
def __applyGroups( self, pkt, groups, tag_chars ) :
group_str = '%s[%d, %d]'
pkt = bytearray(pkt)
for tag, start, end in groups :
start = int(start)
end = int(end)
group_repr = group_str % (tag, start, end)
if tag not in tag_chars :
raise StegoSchemeParseException( "Group Tag '%s' in Group: '%s' is not defined." % (tag, group_repr) )
if start > end :
raise StegoSchemeParseException( "Starting byte is greater than Ending Byte in Group %s" % group_repr)
for hex_index in range(0, len( pkt ), 2) :
byte_index = hex_index // 2
# print hex_index, byte_index
if byte_index >= start and byte_index < end :
pkt[ hex_index ] = tag
pkt[ hex_index + 1] = tag
# print pkt
return str(pkt)
# def blankPacket( self, packet, template ) :
# # cap = self.getCapacity( template )
# ret = ''
# templ_pkt = self.getTemplate( template )
# for c in templ_pkt :
#
# pkt =
# pkt = self.inject( '\x00'*cap, template, packet )
# # print pkt.encode('hex')
# return pkt
def blankifyPacketFields( self, pkt, template, zero = False ) :
# print pkt
sample = self.getTemplate( template )
pkt = bytearray(pkt)
for i in range( len(sample) ) :
char = sample[i]
# print sample[i]
if char in list(self.__tags.keys()) :
if zero :
char = '\x00'
pkt[i] = ord(char)
# print i, char, pkt, "<<"
return str(pkt)
def injectByTag( self, data_dict, template, pkt = None ) :
"""
:param dict data_dict: The data to be injected in a dict format, with *Tag Letters* as keys and Data to be injected where the specific Tag Letters are placed, as values.
:param str template: The template that will be used to inject the data into.
:param str pkt: A packet that matches the template is size, to inject the data instead of the template. A copy of the template will be used if this argument is not provided.
:rtype: str
:return: Template or packet with the given data injected.
A sample *configuration* :
.. code:: python
X:_data_:
Y:_data_:
sample='''4141XX4242YY'''
Example ::
data_dict = { 'X' : '0', 'Y' : '1' }
psi = StegoInjector( configuration )
psi.injectByTag( data_dict, 'sample1' )
'AA0BB1'
"""
data_len = len( ''.join(list(data_dict.values())) )
hex_pkt = self.__initializeInjection( data_len, template, pkt )
sample_capacity = self.getCapacity( template )
# print hex_pkt
injection_dict = data_dict
# print injection_dict
# print self.getTemplate( template )
# print hex_pkt
# print pkt.encode('hex')
pkt = self.__injectFromDict( hex_pkt, injection_dict, sample_capacity, template = template )
# print (pkt)
pkt = codecs.decode(pkt, 'hex')
# print pkt
# print injection_dict
return pkt
def inject( self, data, template, pkt = None ) :
"""
:param str data: The data to be injected in raw bytes
:param str template: The template that will be used to inject the data into.
:param str pkt: A packet that matches the template is size, to inject the data instead of the template. A copy of the template will be used if this argument is not provided.
:rtype: str
:return: Template or packet with the given data injected.
"""
data_len = len( data )
hex_pkt = self.__initializeInjection( data_len, template, pkt )
sample_capacity = self.getCapacity( template )
hex_pkt = str(hex_pkt)
# hex_pkt = bytearray(hex_pkt)
if self.hex_inject :
data = data.encode('hex')
injection_dict = self.__createInjectionDict( data, template )
inj_hex_pkt = self.__injectFromDict( hex_pkt, injection_dict, sample_capacity, template = template )
# print injection_dict
# print inj_hex_pkt
pkt = codecs.decode(inj_hex_pkt, 'hex')
return pkt
def getTemplate( self, template ) :
if template not in list(self.__packets.keys()) :
raise TemplateNotFoundException( "Template '%s' is not available" % template)
return self.__packets[ template ][0]
def __initializeInjection( self, data_len, template, pkt = None ) :
sample_packet = self.getTemplate( template )
# print sample_packet
sample_capacity = self.getCapacity( template )
if pkt == None :
pkt = deepcopy( sample_packet ) # COPY DEEPLY
if data_len != sample_capacity :
raise StegoDataInjectionException(
"Trying to inject %d bytes in template '%s' with capacity '%d' bytes" % (data_len, template, sample_capacity)
)
# pkt = self.__blankifyPacketFields( pkt, template, )
# pkt = self.injector.blankPacket( pkt, template )
else :
pkt = codecs.encode(pkt, 'hex')
# pkt = self.blankifyPacketFields(pkt, template)
sample = bytearray( sample_packet )
pkt = bytearray( pkt )
# print(sample)
# print(pkt)
# print(len(sample), len(pkt))
if len(sample) != len(pkt) :
raise StegoDataInjectionException( "Given packet has not the same length with the Sample." )
# if pkt != sample :
# print (pkt)
return pkt
def __createInjectionDict( self, data, template ) :
# data = bytearray(data)
# print hex_pkt
hex_pkt = self.getTemplate(template)
sample_capacity = self.getCapacity(template)
data_hex = codecs.encode(data, 'hex')
injection_dict = {}
for tag in self.__tags :
injection_dict[tag] = ''
hex_pkt = str(hex_pkt)
for hex_index, hex_char in enumerate( hex_pkt ) :
# print hex_char, type(hex_char)
# print hex_char, self.__tags.keys()
if hex_char in list(self.__tags.keys()) :
tag = hex_char
# print tag
# print "++++++++++++++++++++++++="
half_byte_hex = data_hex[0] # pop(0) for strings
data_hex = data_hex [1:]
injection_dict[ tag ] += half_byte_hex
for tag in list(injection_dict.keys()) :
value = codecs.decode(injection_dict[tag], 'hex')
injection_dict[tag] = bytearray( value )
# print len( data_hex ), data_hex
# print injection_dict
assert len( data_hex ) == 0
# print "success!"
return injection_dict
def __injectFromDict( self, pkt_initial, injection_dict, sample_cap, template = None ) :
# print injection_dict
pkt_initial = bytearray(pkt_initial)
template_packet = self.getTemplate( template )
pkt_hex = bytearray(template_packet)
for tag, data in list(injection_dict.items()) :
data = bytearray(data)
inj_function = self.__tags[ tag ]['inj_function']
while data :
data_byte = chr(data.pop(0))
hex1_index = pkt_hex.index( tag )
byte_index = hex1_index // 2
evaled_byte = self.__eval_environ( data_byte, inj_function, len(pkt_hex), byte_index, sample_cap )
hex_byte = evaled_byte.encode('hex')
# print tag, evaled_byte.encode('hex')
pkt_hex[hex1_index] = hex_byte[0]
# print type(hex_byte[0]), type(pkt_initial)
pkt_initial[hex1_index] = hex_byte[0]
# print pkt_hex
hex2_index = pkt_hex.index( tag )
pkt_hex[hex2_index] = hex_byte[1]
pkt_initial[hex2_index] = hex_byte[1]
# print pkt_hex
# print pkt_initial
# print hex1_index, hex2_index
# print pkt_initial
# pkt_initial = str(pkt_initial, 'utf8')
return pkt_initial
def extract( self, pkt, template ) :
"""
:param str pkt: A packet that matches the template in size, that contains covert data the way the `template` provides.
:param str template: The template that will be used to extract the data from. It must be the same with the one used to inject the data in the `pkt`.
:rtype: str
:return: The data extracted from the `pkt`
"""
extract_dict = self.__initializeDataExtraction( pkt, template )
data = bytearray()
# print extract_dict
for tag, value in sorted( extract_dict.items() ) :
data.extend( value )
return str(data)
def extractByTag( self, pkt, template ) :
return self.__initializeDataExtraction( pkt, template )
def __initializeDataExtraction( self, pkt, template ) :
extract_dict = {}
pkt_hex = codecs.encode(bytes( pkt ), 'hex')
if template not in list(self.__packets.keys()) :
raise TemplateNotFoundException( "Template '%s' is not available" % template)
sample_hex, sample_cap = self.__packets[ template ]
data = ''
sample_hex = bytearray(sample_hex)
# print len(sample_hex), len(pkt_hex)
if len(sample_hex) != len(pkt_hex) :
raise StegoDataExtractionException("Given packet and Sample packet have not the same length")
for tag, functions in sorted( self.__tags.items() ) :
extr_function = functions['extr_function']
extract_data_ = ''
while tag in sample_hex :
tag_index = sample_hex.index( tag )
byte_index = tag_index // 2
hex1 = pkt_hex[ tag_index ]
sample_hex[ tag_index ] = '~' # Remove the Tag
tag_index = sample_hex.index( tag )
hex2 = pkt_hex[ tag_index ]
sample_hex[ tag_index ] = '~' # Remove the Tag
hex_str = hex1 + hex2
raw_byte_ = codecs.decode(hex_str, 'hex')
data_byte_ = self.__eval_environ\
( raw_byte_, extr_function, len(pkt), byte_index, sample_cap )
extract_data_ += data_byte_
# print hex_str+"->"+data_byte_.encode('hex')
if self.hex_inject :
extract_data_ = codecs.decode(extract_data_, 'hex')
extract_dict[tag] = bytearray( extract_data_ )
# print sample_hex
# print extract_dict.keys()
return extract_dict
def __eval_environ( self, _data_, function, _len_, _index_ ,_capacity_ ) :
# ============== Eval Environment ======
return eval( function )
# ======================================
def guessTemplate( self, pkt ) :
"""
This method tries to guess the used template of a data packet by computing similarity of all templates against it.
:param str pkt: The data packet whose template is guessed.
:rtype: str
:return: A tuple containing the template name that matches best with the given packets and the similarity ratio.
"""
ret = []
for template in list(self.__packets.keys()) :
cap = self.getCapacity( template )
payload = "\x00" * cap
pkt_test = self.inject( payload, template )
templ_pkt = self.__packets[ template ][0]
if len( pkt_test ) != len( pkt ) :
continue
pkt_hex = pkt.encode('hex')
# pkt_test2 = self.__blankifyPacketFields(pkt_hex, template)
pkt_test2 = pkt_hex
# print pkt_hex
pkt_test2 = self.inject( payload, template, pkt )
sim_ratio = str_similar( pkt_test2, pkt_test )
ret.append( ( template, sim_ratio ) )
winner = sorted( ret, key = lambda tup:tup[1] )
if not winner : return None # The template couldn't be guessed
return winner[-1]
def asciiToHexTemplate( pkt, marker = '~', substitute = 'X' ) :
"""
This module function converts an ASCII chunk with single-byte `markers` and returns a `template`.
:param str pkt: The data packet in ASCII with `marker` byte where arbitrary bytes can be injected.
:param str marker: The byte that will be interpreted as `marker`
:param str substitute: The byte that will be replace the marker bytes in the hex-`template` representation.
:rtype: str
:return: The template representation populated with the `substitute` wherever the `marker` byte was placed.
Example:
.. code:: python
req = 'GET /search.php?q=~~~~~~~~\\n\\n'
template = asciiToHexTemplate( req )
print template
474554202f7365617263682e7068703f713dXXXXXXXXXXXXXXXX0a0a
"""
marker_hex = marker.encode('hex')
pkt_hex = pkt.encode('hex')
pkt_hex_spaced = ' '.join([ "%s%s" % ( pkt_hex[i], pkt_hex[i+1] )
for i in range( 0, len(pkt_hex) - 1, 2) ])
pkt_hex_spaced = pkt_hex_spaced.replace( marker_hex, substitute * 2 )
return pkt_hex_spaced.replace(' ', '')
|
<reponame>gitter-badger/Py.Html
from io import StringIO
#Create, edit and read Html files
class HtmlPy():
_menuHtmlConst="<nav id=""{Menuname}"">\n <ul>\n <li>Item</li>\n <li>Item</li>\n <li>Item</li>\n </ul>\n</nav>\n"
_formHtmlConst="<form action='{actForm1}'>\n <label>Label</label>\n <input id='{idName}' type='text' name='name'>\n <input type='submit' value='save'>\n</form>"
_geneticElement="<{element} id={idElement}>{value}</{element}>"
_LinkElement="<link href={locaLink} rel={typeLink} type={type} />"
def CreateNewHtml(self,NameHtml,LocalFile,PreCode):
#Project\codReadme.md
if PreCode == 1:
with open(LocalFile, "w") as f:
f.write("<!DOCTYPE html>")
f.write("\n<html>")
f.write("\n<head> <title>{Html}</title>".format(Html=NameHtml))
f.write("\n<meta charset='UTF-8'>")
f.write("\n<meta name="+"'viewport'"+ "content="+"'width=device-width, initial-scale=1.0'"+">")
f.write("\n<link> ")
f.write("\n </head>")
f.write("\n <body>")
f.write("\n </body>")
f.write("\n </html>")
f.close()
else:
with open(LocalFile,"w") as f:
f.write("<html></html>")
f.close()
def LinkCssStarter(self,CssName,HtmlPath):
line=0
text=""
with open(HtmlPath,"r")as f:
text=f.readlines()
for linha in text:
if "<link>" in linha:
print(line)
f.close()
break
line+=1
with open(HtmlPath,"w")as f:
text[line]="<link href='{Cssname}' rel='stylesheet' type='text/css' />\n".format(Cssname=CssName)
for linha in text:
f.write(linha)
f.close()
def CreateMenuHtml(self,local,idMenu,LineIndex):
text=""
with open(local,'r') as f:
text=f.readlines()
f.close()
pass
with open(local,'w')as f:
LineIndex-=1
text[LineIndex]+="\n"+self._menuHtmlConst.format(Menuname="'"+idMenu+"'")
for lines in text:
f.write(lines)
pass
f.close()
pass
def CreateFormHtml(self,local,idform,LineIndex,ActForm,IdnameInput):
text=""
with open(local,'r') as f:
text=f.readlines()
f.close()
pass
with open(local,'w')as f:
LineIndex-=1
text[LineIndex]+="\n"+self._formHtmlConst.format(actForm1=ActForm,idName=IdnameInput)+"\n"
for lines in text:
f.write(lines)
pass
pass
def linkCssFile(self,local,CssPath,LineIndex):
text=""
with open(local,"r")as f:
text=f.readlines()
f.close()
pass
with open(local,"w")as f:
LineIndex-=1
text[LineIndex]+=self._LinkElement.format(locaLink=local,typeLink="stylesheet",type="text/css")+"\n"
for lines in text:
f.write(lines)
pass
pass
def createNewElement(self,local,id,Element,Lineindex,Value):
text=""
with open(local,"r") as f:
text=f.readlines()
f.close()
pass
with open(local,"w") as f:
Lineindex-=1
text[Lineindex]+="\n"+self._geneticElement.format(element=Element,idElement=id,value=Value)+"\n"
for lines in text:
f.write(lines)
pass
pass
def responsiveWindowMeta(self,local,escale,LineIndex):
text=""
with open(local,"r") as f:
text=f.readlines()
f.close()
pass
with open(local,"w")as f:
LineIndex-=1
text[LineIndex]+="\n"+"<meta name='Viewport' content='width=device-width, initial-scale={scale}'>\n".format(scale=escale)
for lines in text:
f.write(lines)
f.close()
pass
def createAppSpaDefault(self,local,AppName,Content,LineIndex):
text=""
element="<div id='content'> \n<main> \n<div id='{name}' class='app default'>\n <div>{content}</div>\n </div> \n </main>\n</div>\n".format(name=AppName,content=Content)
with open(local,"r") as f:
text=f.readlines()
f.close()
pass
with open(local,"w")as f:
LineIndex-=1
text[LineIndex]+=element
for lines in text:
f.write(lines)
f.close()
pass
def createAppSpa(self,local,AppName,Content,LineIndex):
text=""
element="<div id='{name}' class='app'>\n <div>{content}</div>\n </div>\n".format(name=AppName,content=Content)
with open(local,"r") as f:
text=f.readlines()
f.close()
pass
with open(local,"w")as f:
LineIndex-=1
text[LineIndex]+=element
for lines in text:
f.write(lines)
f.close()
pass
pass
#Create, edit and read CSS files
class CssPy():
def CreateCss(self,pathCss,CssName):
with open(pathCss,"w") as f:
f.write("/*Css Code write with: [Py.Html]*/")
f.close()
pass
pass
def CreateRuleId(self,NameCss,LocalCss,Id,Rule):
try:
with open(LocalCss, "a") as f:
f.write("\n")
f.write("#"+Id+"{\n"+Rule+"\n" +"} \n")
f.close()
pass
pass
except:
print("Error of local CSS file.")
pass
pass
def CreateRuleClass(self,NameCss,LocalCss,Class,Rule):
try:
with open(LocalCss, "a") as f:
f.write("\n")
f.write("."+Id+"{\n"+Rule+"\n" +"} \n")
f.close()
pass
pass
except:
print("Error of local CSS file.")
pass
pass
def CreateRuleClassRules(self,NameCss,LocalCss,classCss,RuleA,Rule0,Rule1,Rule2):
try:
with open(LocalCss, "a") as f:
f.write("\n")
f.write("."+classCss+"{\n"+RuleA+"\n")
f.write(Rule0+"\n"+Rule1+"\n"+Rule2+"\n")
f.write("}\n")
f.close()
pass
pass
except:
print("Error of local CSS file.")
pass
pass
def CreateRuleIdRules(self,NameCss,LocalCss,Id,RuleA,Rule0,Rule1,Rule2):
try:
with open(LocalCss, "a") as f:
f.write("\n")
f.write("#"+Id+"{\n"+RuleA+"\n")
f.write(Rule0+"\n"+Rule1+"\n"+Rule2+"\n")
f.write("}\n")
f.close()
pass
pass
except:
print("Error of local CSS file.")
pass
pass
def appendNewRule(self,LocalCss,Id,RuleX1,RuleX2,RuleX3):
line=0
endline=0
text=""
with open(LocalCss,"r")as f:
text=f.readlines()
for linha in text:
print(linha)
if "#"+Id+"{" == linha:
print(line)
for enline in text:
if "}" in enline:
print(enline)
break
endline+=1
f.close()
break
line+=1
f.close()
pass
with open(LocalCss,"w")as f:
if(RuleX1!=""):
text[endline-2]+=RuleX1+"\n"
if(RuleX2!=""):
text[endline-2]+=RuleX2+"\n"
if(RuleX3!=""):
text[endline-2]+=RuleX3+"\n"
for linha in text:
f.write(linha)
f.close()
pass
def appendLinkSpaRule(self,HtmlPath,LineIndex):
text=""
rule="\n<link href='\Framework\cssComponents\Spa.css' rel='stylesheet' type='text/css' />\n"
with open(HtmlPath,"r")as f:
text=f.readlines()
LineIndex-=1
f.close()
with open(HtmlPath,"w")as f:
text[LineIndex]= rule
for linha in text:
f.write(linha)
f.close()
pass
pass
#Class for search data in HTML files and create analitic data.
class SearchData():
_localHtmlFiles=[]
_localCssFiles=[]
_localJsFiles=[]
def createNewFileLocal(self,html,css,js):
if html!="":
self._localHtmlFiles.append(html)
if css!="":
self._localCssFiles.append(css)
if js!="":
self._localJsFiles.append(js)
pass
pass |
import csv
import pprint
from datetime import datetime
count_by_state = {}
count_by_category = {}
count_by_area_of_significance = {}
count_by_year_listed = {}
pp = pprint.PrettyPrinter(indent=4)
#create counters for properties by state, category, area of significance, and year listed
with open("/Users/juliecarlson/Desktop/628 Final Project/results/2021-11-01_National-Register-Processed-Dataset.csv", "r") as originalcsvfile:
reader = csv.DictReader(originalcsvfile)
for row in reader:
#count number of properties by state
if row['State'] not in count_by_state:
count_by_state[row['State']]=0
count_by_state[row['State']]=count_by_state[row['State']]+1
#count number of properties by category, filtering out properties without a category
if row['Category of Property'] != '':
if row['Category of Property'] not in count_by_category:
count_by_category[row['Category of Property']]=0
count_by_category[row['Category of Property']]=count_by_category[row['Category of Property']]+1
#count number of properties by area of significance, filtering out properties without an area of significance
if row['Area of Significance'] != '':
#loop through rows that contain only one area of significance
if ";" not in row['Area of Significance']:
if row['Area of Significance'] not in count_by_area_of_significance:
count_by_area_of_significance[row['Area of Significance']]=0
count_by_area_of_significance[row['Area of Significance']]=count_by_area_of_significance[row['Area of Significance']]+1
#loop through rows that contain multiple areas of significance
if ";" in row['Area of Significance']:
area_of_signifiance_lists = row['Area of Significance'].split("; ")
for row['Area of Significance'] in area_of_signifiance_lists:
if row['Area of Significance'] not in count_by_area_of_significance:
count_by_area_of_significance[row['Area of Significance']]=0
if row['Area of Significance'] in count_by_area_of_significance:
count_by_area_of_significance[row['Area of Significance']]=count_by_area_of_significance[row['Area of Significance']]+1
#count the number of properties by the year they were added to the National Register
if row['Listed Year'] != '':
if row['Listed Year'] not in count_by_year_listed:
count_by_year_listed[row['Listed Year']]=0
count_by_year_listed[row['Listed Year']]=count_by_year_listed[row['Listed Year']]+1
#print dictionaries to check that the script has worked
pp.pprint(count_by_state)
pp.pprint(count_by_category)
pp.pprint(count_by_area_of_significance)
pp.pprint(count_by_year_listed)
#create csv file names that will include the date in ISO 8601 format
datestr = datetime.now().strftime("%Y-%m-%d")
count_by_state_file = "/Users/juliecarlson/Desktop/628 Final Project/results/"+datestr+"_Count_by_State.csv"
count_by_category_file = "/Users/juliecarlson/Desktop/628 Final Project/results/"+datestr+"_Count_by_Category.csv"
count_by_area_of_significance_file = "/Users/juliecarlson/Desktop/628 Final Project/results/"+datestr+"_Count_by_Area_of_Significance.csv"
count_by_year_listed_file = "/Users/juliecarlson/Desktop/628 Final Project/results/"+datestr+"_Count_by_Year_Listed.csv"
#write the dictionaries to csv files
with open(count_by_state_file, "w") as newcsvfile:
header = ['State', 'Count']
writer = csv.writer(newcsvfile)
writer.writerow(header)
for key, value in count_by_state.items():
writer.writerow([key, value])
with open(count_by_category_file, "w") as newcsvfile:
header = ['Category of Property', 'Count']
writer = csv.writer(newcsvfile)
writer.writerow(header)
for key, value in count_by_category.items():
writer.writerow([key, value])
with open(count_by_area_of_significance_file, "w") as newcsvfile:
header = ['Area of Significance', 'Count']
writer = csv.writer(newcsvfile)
writer.writerow(header)
for key, value in count_by_area_of_significance.items():
writer.writerow([key, value])
with open(count_by_year_listed_file, "w") as newcsvfile:
header = ['Year Listed', 'Count']
writer = csv.writer(newcsvfile)
writer.writerow(header)
for key, value in count_by_year_listed.items():
writer.writerow([key, value])
|
import settings
import manager
import utils
import pexpect
from pexpect.replwrap import REPLWrapper
import os
import time
def logpath(tty):
return os.path.join(settings.log_dir, tty+'.log')
log_error_re = settings.make_log_error_re()
class NoTTYError(Exception):
pass
class Mspdebug(object):
def __init__(self):
self.tty = None
self.log_f = None
self.spawn = None
self.repl = None
def open_log(self):
if not os.path.isdir(settings.log_dir):
os.makedirs(settings.log_dir, exist_ok=True)
self.log_f = open(logpath(self.tty), 'at')
self.log_f.write(settings.make_log_spacer())
self.log_f.flush()
# I don't know if pexpect does this for us, but it probably doesn't hurt.
def close_log(self):
self.log_f.close()
# This is kind of a stupid hack, but it looks like REPLWrapper blows away any
# info that would let us pull this out of the pexpect.spawn object directly.
# Or can we just use self.spawn.before ??
def get_error_from_log(self):
self.log_f.flush()
with open(logpath(self.tty), 'rb') as f:
try:
f.seek(-settings.log_error_window, 2)
logtail = f.read(1024).decode()
except OSError:
# Seek can fail if the file is to short, so just read the whole thing.
f.seek(0)
logtail = f.read().decode()
codes = log_error_re.findall(logtail)
try:
error_code = int(codes[-1])
except Exception:
error_code = None
return error_code
def start_repl(self):
repl = None
while repl is None:
tty = manager.get_tty()
if tty is None:
raise NoTTYError
else:
self.tty = tty
mspargs = [settings.mspdebug_driver, '-d', self.tty]
self.open_log()
try:
P = pexpect.spawn('mspdebug', mspargs, encoding='ascii', logfile=self.log_f)
R = REPLWrapper(P, settings.mspdebug_prompt, None)
manager.claim_tty(self.tty, P.pid)
spawn = P
repl = R
except pexpect.EOF:
error_code = self.get_error_from_log()
if error_code in settings.errors_to_mark:
manager.mark_tty(self.tty)
self.close_log()
self.spawn = spawn
self.repl = repl
def exit_repl(self):
try:
self.repl.run_command('exit')
except pexpect.EOF:
manager.release_tty(self.tty)
else:
print('failed to release tty {}'.format(repr(self.tty)))
def __enter__(self):
self.start_repl()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.exit_repl()
self.close_log()
# raw text api
def run_command(self, cmd):
cleaned = cmd.strip()
if cleaned:
cmd_args = cleaned.split()
if cmd_args[0] in settings.mspdebug_cmd_blacklist:
return '{:s}: blacklisted!'.format(cmd_args[0])
else:
return self.repl.run_command(cleaned)
else:
return '{:s}: no command'.format(repr(cmd))
def run_continue(self):
self.spawn.sendline('run')
self.spawn.expect_exact('Running. Press Ctrl+C to interrupt...')
return self.spawn.before
def interrupt(self):
self.spawn.sendintr()
self.spawn.expect_exact(settings.mspdebug_prompt)
return self.spawn.before
# standard python-level api
def reset(self):
self.run_command('reset')
reg_output = self.run_command('regs')
regs = utils.parse_regs(reg_output)
return regs[0]
def prog(self, fname):
raw_output = self.run_command('prog {:s}'.format(fname))
imgsize = utils.parse_prog(raw_output)
if imgsize is None:
return raw_output.strip()
else:
reg_output = self.run_command('regs')
regs = utils.parse_regs(reg_output)
return regs[0]
def mw(self, addr, pattern):
self.run_command(('mw {:#x}' + (' {:#x}' * len(pattern))).format(addr, *pattern))
def fill(self, addr, size, pattern):
self.run_command(('fill {:#x} {:d}' + (' {:#x}' * len(pattern))).format(addr, size, *pattern))
def setreg(self, register, value):
self.run_command('set {:d} {:#x}'.format(register, value))
def md(self, addr, size):
raw_output = self.run_command('md {:#x} {:d}'.format(addr, size))
base_addr, data = utils.parse_mem(raw_output)
assert base_addr == addr
return data
def regs(self):
raw_output = self.run_command('regs')
return utils.parse_regs(raw_output)
def step(self):
raw_output = self.run_command('step')
regs = utils.parse_regs(raw_output)
return regs[0]
def run(self, interval = 0.5):
self.run_continue()
time.sleep(interval)
raw_output = self.interrupt()
regs = utils.parse_regs(raw_output)
return regs[0]
|
<reponame>t-mcneal/miurl
from flask import Flask, render_template, request, redirect, url_for, flash, abort, session, jsonify
import json
import os.path
from werkzeug.utils import secure_filename
app = Flask(__name__)
# NOTE: Change the secret key below to a new random string for security purposes since
# the "app.py" file is publicly available on GitHub
app.secret_key = b'\<KEY>'
# NOTE: Insert your computer's absolute file path to the "user_files" directory located
# in the "static" directory
app.config['USER_FILES'] = 'path/miurl/static/user_files/'
app.config['ALLOWED_EXTENSIONS'] = ['pdf', 'png', 'jpg', 'jpeg', 'gif']
@app.route('/')
def home():
return render_template('home.html', codes=session.keys())
@app.route('/your-url', methods=['GET', 'POST'])
def your_url():
if request.method == 'POST':
urls = {}
if os.path.exists('urls.json'):
with open('urls.json') as urls_file:
urls = json.load(urls_file)
if request.form['code'] in urls.keys():
flash('That short name has already been taken. Please select another name.')
return redirect(url_for('home'))
if 'url' in request.form.keys():
urls[request.form['code']] = {'url': request.form['url']}
else:
f = request.files['file']
full_name = request.form['code'] + secure_filename(f.filename)
if allowed_file(full_name):
f.save(os.path.join(app.config['USER_FILES'], full_name))
urls[request.form['code']] = {'file': full_name}
else:
flash('File type must be a PDF, PNG, JPEG, JPG, or GIF.')
return redirect(url_for('home'))
with open('urls.json', 'w') as url_file:
json.dump(urls, url_file)
session[request.form['code']] = True
return render_template('your_url.html', code=request.form['code'])
else:
return redirect(url_for('home'))
@app.route('/<string:code>')
def redirect_to_url(code):
if os.path.exists('urls.json'):
with open('urls.json') as urls_file:
urls = json.load(urls_file)
if code in urls.keys():
if 'url' in urls[code].keys():
return redirect(urls[code]['url'])
else:
return redirect(url_for('static', filename='user_files/' + urls[code]['file']))
return abort(404)
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
@app.context_processor
def override_url_for():
return dict(url_for=dated_url_for)
def dated_url_for(endpoint, **values):
"""Adds last modified timestamp to static (CSS and JavaScript) links.
Keyword arguments:
endpoint -- directory of files to add time stamps
**values -- keyworded, variable-length argument list of files in the endpoint directory
"""
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(app.root_path, endpoint, filename)
values['q'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
def allowed_file(filename):
"""Returns True if a file type is a PDF, PNG, JPG, JPEG, or GIF.
Returns False otherwise.
Keyword arguments:
filename -- name of file
"""
if filename[-4] == "." or filename[-5] == ".":
if filename[-4] == ".":
ext = filename[-3:].lower()
else:
ext = filename[-4:].lower()
if ext in app.config['ALLOWED_EXTENSIONS']:
return True
return False
|
<gh_stars>100-1000
#!/usr/bin/env python
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright 2013-2021 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
import sys
import time
import numpy as np
import OpenGL
from typer import Argument, Option, run
from pymor.core.config import is_windows_platform
from pymor.discretizers.builtin.gui.matplotlib import MatplotlibPatchWidget
OpenGL.ERROR_ON_COPY = True
from pymor.core.exceptions import QtMissing
try:
from qtpy import QtWidgets
from qtpy import QtCore
except ImportError:
raise QtMissing()
from pymor.algorithms.greedy import rb_greedy
from pymor.analyticalproblems.thermalblock import thermal_block_problem
from pymor.discretizers.builtin import discretize_stationary_cg
from pymor.discretizers.builtin.gui.gl import ColorBarWidget, GLPatchWidget
from pymor.reductors.coercive import CoerciveRBReductor
from pymor.tools.typer import Choices
PARAM_STEPS = 10
PARAM_MIN = 0.1
PARAM_MAX = 1
def main(
xblocks: int = Argument(..., help='Number of blocks in x direction.'),
yblocks: int = Argument(..., help='Number of blocks in y direction.'),
snapshots: int = Argument(
...,
help='Number of snapshots for basis generation per component. In total SNAPSHOTS^(XBLOCKS * YBLOCKS).'
),
rbsize: int = Argument(..., help='Size of the reduced basis.'),
grid: int = Option(60, help='Use grid with 2*NI*NI elements.'),
product: Choices('euclidean h1') = Option(
'h1',
help='Product w.r.t. which to orthonormalize and calculate Riesz representatives.'
),
testing: bool = Option(False, help='Load the gui and exit right away (for functional testing).'),
):
"""Thermalblock demo with GUI."""
app = QtWidgets.QApplication.instance()
if app is None:
app = QtWidgets.QApplication(sys.argv)
win = RBGui(xblocks, yblocks, snapshots, rbsize, grid, product)
win.show()
if testing:
QtCore.QTimer.singleShot(1000, app.quit)
app.exec_()
class ParamRuler(QtWidgets.QWidget):
def __init__(self, parent, sim):
super().__init__(parent)
self.sim = sim
self.setMinimumSize(200, 100)
box = QtWidgets.QGridLayout()
self.spins = []
for j in range(sim.xblocks):
for i in range(sim.yblocks):
spin = QtWidgets.QDoubleSpinBox()
spin.setRange(PARAM_MIN, PARAM_MAX)
spin.setSingleStep((PARAM_MAX - PARAM_MIN) / PARAM_STEPS)
spin.setValue(PARAM_MIN)
self.spins.append(spin)
box.addWidget(spin, j, i)
spin.valueChanged.connect(parent.solve_update)
self.setLayout(box)
def enable(self, enable=True):
for spin in self.spins:
spin.isEnabled = enable
# noinspection PyShadowingNames
class SimPanel(QtWidgets.QWidget):
def __init__(self, parent, sim):
super().__init__(parent)
self.sim = sim
box = QtWidgets.QHBoxLayout()
if is_windows_platform():
self.solution = MatplotlibPatchWidget(self, self.sim.grid, vmin=0., vmax=0.8)
box.addWidget(self.solution, 2)
else:
self.solution = GLPatchWidget(self, self.sim.grid, vmin=0., vmax=0.8)
self.bar = ColorBarWidget(self, vmin=0., vmax=0.8)
box.addWidget(self.solution, 2)
box.addWidget(self.bar, 2)
self.param_panel = ParamRuler(self, sim)
box.addWidget(self.param_panel)
self.setLayout(box)
def solve_update(self):
tic = time.perf_counter()
self.param_panel.enable(False)
shape = (self.sim.yblocks, self.sim.xblocks)
mu = {'diffusion': np.array([s.value() for s in self.param_panel.spins]).reshape(shape)}
U = self.sim.solve(mu)
print(f'Simtime {time.perf_counter()-tic}')
tic = time.perf_counter()
self.solution.set(U.to_numpy().ravel())
self.param_panel.enable(True)
print(f'Drawtime {time.perf_counter()-tic}')
class AllPanel(QtWidgets.QWidget):
def __init__(self, parent, reduced_sim, detailed_sim):
super().__init__(parent)
box = QtWidgets.QVBoxLayout()
self.reduced_panel = SimPanel(self, reduced_sim)
self.detailed_panel = SimPanel(self, detailed_sim)
box.addWidget(self.reduced_panel)
box.addWidget(self.detailed_panel)
self.setLayout(box)
# noinspection PyShadowingNames
class RBGui(QtWidgets.QMainWindow):
def __init__(self, *args):
super().__init__()
reduced = ReducedSim(*args)
detailed = DetailedSim(*args)
self.panel = AllPanel(self, reduced, detailed)
self.setCentralWidget(self.panel)
# noinspection PyShadowingNames
class SimBase:
def __init__(self, xblocks, yblocks, snapshots, rbsize, grid, product):
self.snapshots, self.rbsize, self.product = snapshots, rbsize, product
self.xblocks, self.yblocks = xblocks, yblocks
self.first = True
self.problem = thermal_block_problem(num_blocks=(xblocks, yblocks),
parameter_range=(PARAM_MIN, PARAM_MAX))
self.m, pack = discretize_stationary_cg(self.problem, diameter=1. / grid)
self.grid = pack['grid']
# noinspection PyShadowingNames,PyShadowingNames
class ReducedSim(SimBase):
def __init__(self, *args):
super().__init__(*args)
def _first(self):
product = self.m.h1_0_semi_product if self.product == 'h1' else None
reductor = CoerciveRBReductor(self.m, product=product)
greedy_data = rb_greedy(self.m, reductor,
self.problem.parameter_space.sample_uniformly(self.snapshots),
use_error_estimator=True, error_norm=self.m.h1_0_semi_norm,
max_extensions=self.rbsize)
self.rom, self.reductor = greedy_data['rom'], reductor
self.first = False
def solve(self, mu):
if self.first:
self._first()
return self.reductor.reconstruct(self.rom.solve(mu))
# noinspection PyShadowingNames
class DetailedSim(SimBase):
def __init__(self, *args):
super().__init__(*args)
self.m.disable_caching()
def solve(self, mu):
return self.m.solve(mu)
if __name__ == '__main__':
run(main)
|
<filename>test/table_request.py
#
# Copyright (c) 2018, 2021 Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Universal Permissive License v 1.0 as shown at
# https://oss.oracle.com/licenses/upl/
#
import unittest
from time import sleep
from borneo import (
GetTableRequest, IllegalArgumentException, OperationNotSupportedException,
State, TableLimits, TableNotFoundException, TableRequest)
from parameters import (
index_name, is_onprem, is_pod, table_name, table_request_timeout, tenant_id,
wait_timeout)
from test_base import TestBase
from testutils import get_handle_config
class TestTableRequest(unittest.TestCase, TestBase):
@classmethod
def setUpClass(cls):
cls.set_up_class()
@classmethod
def tearDownClass(cls):
cls.tear_down_class()
def setUp(self):
self.set_up()
self.handle_config = get_handle_config(tenant_id)
self.create_tb_statement = (
'CREATE TABLE ' + table_name + '(fld_id INTEGER, fld_long LONG, \
fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, fld_str STRING, \
fld_bin BINARY, fld_time TIMESTAMP(4), fld_num NUMBER, fld_json JSON, \
fld_arr ARRAY(STRING), fld_map MAP(STRING), \
fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \
PRIMARY KEY(fld_id)) USING TTL 30 DAYS')
self.create_idx_statement = (
'CREATE INDEX ' + index_name + ' ON ' + table_name +
'(fld_str, fld_double)')
self.alter_fld_statement = (
'ALTER TABLE ' + table_name + '(DROP fld_num)')
self.alter_ttl_statement = (
'ALTER TABLE ' + table_name + ' USING TTL 16 HOURS')
self.drop_idx_statement = (
'DROP INDEX ' + index_name + ' ON ' + table_name)
self.drop_tb_statement = ('DROP TABLE IF EXISTS ' + table_name)
self.drop_tb_statement1 = ('DROP TABLE ' + table_name)
self.table_request = TableRequest()
self.table_limits = TableLimits(100, 100, 1)
def tearDown(self):
try:
get_table = GetTableRequest().set_table_name(table_name)
result = self.handle.get_table(get_table)
result.wait_for_completion(self.handle, wait_timeout, 1000)
drop_request = TableRequest().set_statement(self.drop_tb_statement)
self._do_table_request(drop_request)
except TableNotFoundException:
pass
finally:
self.tear_down()
def testTableRequestSetIllegalStatement(self):
self.assertRaises(IllegalArgumentException,
self.table_request.set_statement, {})
self.assertRaises(IllegalArgumentException,
self.table_request.set_statement, '')
self.table_request.set_statement('IllegalStatement')
self.assertRaises(IllegalArgumentException, self.handle.table_request,
self.table_request)
self.table_request.set_statement(
'CREATE INDEX ' + index_name + ' ON IllegalTable(fld_num)')
self.assertRaises(TableNotFoundException, self.handle.table_request,
self.table_request)
def testTableRequestSetIllegalCompartment(self):
self.assertRaises(IllegalArgumentException,
self.table_request.set_compartment, {})
self.assertRaises(IllegalArgumentException,
self.table_request.set_compartment, '')
def testTableRequestSetIllegalTableLimits(self):
self.assertRaises(IllegalArgumentException,
self.table_request.set_table_limits,
'IllegalTableLimits')
self.assertRaises(IllegalArgumentException,
self.table_request.set_table_limits, None)
self.table_request.set_statement(
self.create_tb_statement).set_table_limits(TableLimits(100, 0, 1))
self.assertRaises(IllegalArgumentException, self.handle.table_request,
self.table_request)
def testTableRequestSetIllegalTableName(self):
self.assertRaises(IllegalArgumentException,
self.table_request.set_table_name,
{'name': table_name})
if not is_onprem():
self.table_request.set_table_name('IllegalTable').set_table_limits(
self.table_limits)
self.assertRaises(TableNotFoundException,
self.handle.table_request, self.table_request)
def testTableRequestSetIllegalTimeout(self):
self.assertRaises(IllegalArgumentException,
self.table_request.set_timeout, 'IllegalTimeout')
self.assertRaises(IllegalArgumentException,
self.table_request.set_timeout, 0)
self.assertRaises(IllegalArgumentException,
self.table_request.set_timeout, -1)
def testTableRequestSetIllegalDefaults(self):
self.assertRaises(IllegalArgumentException,
self.table_request.set_defaults, 'IllegalDefaults')
def testTableRequestSetDefaults(self):
self.table_request.set_defaults(self.handle_config)
self.assertEqual(self.table_request.get_timeout(),
table_request_timeout)
def testTableRequestNoStatementAndTableName(self):
self.assertRaises(IllegalArgumentException, self.handle.table_request,
self.table_request)
def testTableRequestBothStatementAndTableName(self):
self.table_request.set_statement(
self.create_tb_statement).set_table_name(table_name)
self.assertRaises(IllegalArgumentException, self.handle.table_request,
self.table_request)
def testTableRequestOnlyTableName(self):
self.table_request.set_table_name(table_name)
self.assertRaises(IllegalArgumentException, self.handle.table_request,
self.table_request)
def testTableRequestGets(self):
self.table_request.set_table_name(table_name).set_statement(
self.create_tb_statement).set_table_limits(self.table_limits)
self.assertEqual(self.table_request.get_statement(),
self.create_tb_statement)
self.assertIsNone(self.table_request.get_compartment())
self.assertEqual(self.table_request.get_table_limits(),
self.table_limits)
self.assertEqual(self.table_request.get_table_name(), table_name)
def testTableRequestIllegalRequest(self):
self.assertRaises(IllegalArgumentException, self.handle.table_request,
'IllegalRequest')
def testTableRequestCreateDropTable(self):
# create table failed without TableLimits set
self.table_request.set_statement(self.create_tb_statement)
if not is_onprem():
self.assertRaises(IllegalArgumentException,
self.handle.table_request, self.table_request)
# create table succeed with TableLimits set
self.table_request.set_table_limits(self.table_limits)
result = self.handle.table_request(self.table_request)
if is_onprem():
self.check_table_result(result, State.CREATING, has_schema=False)
else:
self.check_table_result(
result, State.CREATING, self.table_limits, False)
result.wait_for_completion(self.handle, wait_timeout, 1000)
self.check_table_result(result, State.ACTIVE, self.table_limits)
# drop table by resetting the statement
self.table_request.set_statement(self.drop_tb_statement)
result = self.handle.table_request(self.table_request)
self.check_table_result(
result, State.DROPPING, check_limit=False, check_schema=False)
result.wait_for_completion(self.handle, wait_timeout, 1000)
self.check_table_result(result, State.DROPPED, has_schema=False)
# ensure that this succeeds if run again
result = self.handle.table_request(self.table_request)
result.wait_for_completion(self.handle, wait_timeout, 1000)
self.check_table_result(result, State.DROPPED, has_schema=False, has_operation_id=False)
# ensure that dropping without "if exists" results in not found
self.table_request.set_statement(self.drop_tb_statement1)
try:
result = self.handle.table_request(self.table_request)
result.wait_for_completion(self.handle, wait_timeout, 1000)
self.fail('TNFE should have been raised')
except TableNotFoundException:
pass
def testTableRequestCreateDropIndex(self):
# create table before creating index
request = TableRequest().set_statement(
self.create_tb_statement).set_table_limits(self.table_limits)
self._do_table_request(request)
# create index by resetting the statement
self.table_request.set_statement(self.create_idx_statement)
result = self.handle.table_request(self.table_request)
self.check_table_result(
result, State.UPDATING, check_limit=False, check_schema=False)
result.wait_for_completion(self.handle, wait_timeout, 1000)
self.check_table_result(result, State.ACTIVE, self.table_limits)
# drop index by resetting the statement
self.table_request.set_statement(self.drop_idx_statement)
result = self.handle.table_request(self.table_request)
self.check_table_result(
result, State.UPDATING, check_limit=False, check_schema=False)
result.wait_for_completion(self.handle, wait_timeout, 1000)
self.check_table_result(result, State.ACTIVE, self.table_limits)
# drop table after dropping index
self.table_request.set_statement(self.drop_tb_statement)
self._do_table_request(self.table_request)
def testTableRequestAlterTable(self):
# create table before altering table
request = TableRequest().set_statement(
self.create_tb_statement).set_table_limits(self.table_limits)
self._do_table_request(request)
# alter table failed with TableLimits set
if not is_onprem():
request.set_statement(self.alter_fld_statement)
self.assertRaises(IllegalArgumentException,
self.handle.table_request, request)
# alter table succeed without TableLimits set
self.table_request.set_statement(self.alter_fld_statement)
result = self.handle.table_request(self.table_request)
self.check_table_result(
result, State.UPDATING, check_limit=False, check_schema=False)
result.wait_for_completion(self.handle, wait_timeout, 1000)
self.check_table_result(result, State.ACTIVE, self.table_limits)
# drop table after altering table
request.set_statement(self.drop_tb_statement)
self._do_table_request(request)
def testTableRequestAlterTableTTL(self):
# create table before altering table
request = TableRequest().set_statement(
self.create_tb_statement).set_table_limits(self.table_limits)
self._do_table_request(request)
# alter table ttl
self.table_request.set_statement(self.alter_ttl_statement)
result = self.handle.table_request(self.table_request)
self.check_table_result(
result, State.UPDATING, check_limit=False, check_schema=False)
result.wait_for_completion(self.handle, wait_timeout, 1000)
self.check_table_result(result, State.ACTIVE, self.table_limits)
# drop table after altering table
request.set_statement(self.drop_tb_statement)
self._do_table_request(request)
def testTableRequestModifyTableLimits(self):
# create table before modifying the table limits
request = TableRequest().set_statement(
self.create_tb_statement).set_table_limits(self.table_limits)
self._do_table_request(request)
# modify the table limits
table_limits = TableLimits(50, 50, 1)
self.table_request.set_table_name(table_name).set_table_limits(
table_limits)
if is_onprem():
self.assertRaises(OperationNotSupportedException,
self.handle.table_request, self.table_request)
return
result = self.handle.table_request(self.table_request)
self.check_table_result(
result, State.UPDATING, check_limit=False, check_schema=False)
result.wait_for_completion(self.handle, wait_timeout, 1000)
self.check_table_result(result, State.ACTIVE, table_limits)
# drop table after modifying the table limits
request.set_statement(self.drop_tb_statement)
self._do_table_request(request)
def _do_table_request(self, request):
#
# Optionally delay to handle the 4 DDL ops/minute limit
# in the real service
#
if is_pod():
sleep(30)
result = self.handle.table_request(request)
result.wait_for_completion(self.handle, wait_timeout, 1000)
if __name__ == '__main__':
unittest.main()
|
# import kivy module
import os
import kivy
import cv2
import numpy as np
from tkinter import Tk
from tkinter.filedialog import askopenfilename
# base Class of your App inherits from the App class.
# app:always refers to the instance of your application
from gtts import gTTS
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.floatlayout import FloatLayout
# From graphics module we are importing
# Rectangle and Color as they are
# basic building of canvas.
from kivy.graphics import Rectangle, Color
# The Label widget is for rendering text.
from kivy.uix.label import Label
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.widget import Widget
import time
from playsound import playsound
class About(Screen):
pass
class WindowManager(ScreenManager):
pass
class Home(Screen):
pass
def image_detect(self):
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
classes = []
with open("coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
# Loading image
Tk().withdraw()
filename = askopenfilename()
if filename!="":
img = cv2.imread(filename)
img = cv2.resize(img, None, fx=0.4, fy=0.4)
height, width, channels = img.shape
# Detecting objects
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
# Showing informations on the screen
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
print(indexes)
font = cv2.FONT_HERSHEY_PLAIN
labels=""
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
labels+=" a "+label + " and"
color = colors[i]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
cv2.putText(img, label, (x, y + 30), font, 3, color, 3)
labels='I see '+' '.join(labels.split(' ')[:-1])
cv2.imshow("Image", img)
language = 'en-us'
myobj = gTTS(text=labels, lang=language, slow=False)
myobj.save("sound.mp3")
playsound("sound.mp3")
os.remove("sound.mp3")
cv2.waitKey(0)
cv2.destroyAllWindows()
def live_detection(self):
net = cv2.dnn.readNet("yolov3-tiny.weights", "yolov3-tiny.cfg")
classes = []
with open("coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
# Loading image
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_PLAIN
starting_time = time.time()
frame_id = 0
while True:
_, frame = cap.read()
frame_id += 1
height, width, channels = frame.shape
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (600, 600), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
# Showing information on the screen
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.2:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.8, 0.3)
labels = ""
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
labels += " a "+label + " and "
confidence = confidences[i]
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label + " " + str(round(confidence, 2)), (x, y + 30), font, 3, color, 3)
elapsed_time = time.time() - starting_time
fps = frame_id / elapsed_time
cv2.putText(frame, "FPS: " + str(round(fps, 2)), (10, 50), font, 4, (0, 0, 0), 3)
cv2.imshow("Image", frame)
key = cv2.waitKey(1)
language = 'en-us'
if labels != "":
labels = 'I see ' + ' '.join(labels.split(' ')[:-1])
myobj = gTTS(text=labels, lang=language, slow=True)
myobj.save("sound.mp3")
playsound("sound.mp3")
os.remove("sound.mp3")
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
# Create the App Class
class GlanceApp(App):
def build(self):
return
# run the App
if __name__=="__main__":
GlanceApp().run()
|
"""
Parallel & Distributed Algorithms - laboratory
Examples:
- Launch 8 workers with default parameter values:
> python arir.py 8
- Launch 12 workers with custom parameter values:
> python arir.py 12 --shared-memory-size 128 --delay-connect 2.0 --delay-transmit 0.5 --delay-process 0.75
"""
__author__ = 'moorglade'
import multiprocessing
import time
import datetime
import sys
import argparse
import random
import math
def _parse_args():
parser = argparse.ArgumentParser()
# specify command line options
parser.add_argument(
'n_workers',
help='number of workers in the distributed system',
type=int
)
parser.add_argument(
'--shared-memory-size',
help='size of the shared memory array [number of ints]',
type=int,
default=16
)
parser.add_argument(
'--delay-connect',
help='network connection delay [s]',
type=float,
default=0.1
)
parser.add_argument(
'--delay-transmit',
help='network transmission delay [s]',
type=float,
default=0.1
)
parser.add_argument(
'--delay-process',
help='processing delay [s]',
type=float,
default=0.1
)
parser.add_argument(
'--array-size',
help='size of array to be sorted',
type=int,
default=24
)
return argparse.Namespace(**{
key.replace('-', '_'): value
for key, value in vars(parser.parse_args()).items()
})
class DistributedSystem(object):
def __init__(self, configuration):
object.__init__(self)
shared = SharedState(configuration.n_workers, configuration.shared_memory_size)
network = Network(configuration)
self.__workers = [
Worker(worker_id, configuration, shared, network.get_endpoint(worker_id))
for worker_id in range(configuration.n_workers)
]
def run(self):
print 'Launching {} workers...'.format(len(self.__workers))
start = datetime.datetime.now()
for worker in self.__workers:
worker.start()
print 'Waiting for the workers to terminate...'
for worker in self.__workers:
worker.join()
stop = datetime.datetime.now()
print 'All workers terminated.'
print 'Processing took {} seconds.'.format((stop - start).total_seconds())
class SharedState(object):
def __init__(self, n_workers, shared_memory_size):
object.__init__(self)
self.__barrier = Barrier(n_workers)
self.__memory = multiprocessing.Array('i', shared_memory_size)
@property
def barrier(self):
return self.__barrier
@property
def memory(self):
return self.__memory
class Barrier(object):
def __init__(self, n):
object.__init__(self)
self.__counter = multiprocessing.Value('i', 0, lock=False)
self.__n = n
self.__condition = multiprocessing.Condition()
def wait(self):
with self.__condition:
self.__counter.value += 1
if self.__counter.value == self.__n:
self.__counter.value = 0
self.__condition.notify_all()
else:
self.__condition.wait()
class SharedMemory(object):
def __init__(self, shared_memory_size):
object.__init__(self)
self.__array = multiprocessing.Array('i', shared_memory_size)
class Network(object):
any_id = -1
def __init__(self, configuration):
object.__init__(self)
channels = [NetworkChannel(configuration) for _ in range(configuration.n_workers)]
self.__endpoints = [NetworkEndpoint(channel_id, channels) for channel_id in range(configuration.n_workers)]
def get_endpoint(self, index):
return self.__endpoints[index]
class NetworkChannel(object):
def __init__(self, configuration):
self.__configuration = configuration
self.__source_id = multiprocessing.Value('i', Network.any_id, lock=False)
self.__queue = multiprocessing.Queue(maxsize=1)
self.__enter_lock = multiprocessing.Lock()
self.__exit_lock = multiprocessing.Lock()
self.__enter_lock.acquire()
self.__exit_lock.acquire()
def send(self, source_id, data):
while True:
self.__enter_lock.acquire()
if self.__source_id.value in [source_id, Network.any_id]:
self.__source_id.value = source_id
self.__queue.put(data)
time.sleep(self.__configuration.delay_connect + len(data) * self.__configuration.delay_transmit)
self.__exit_lock.release()
break
else:
self.__enter_lock.release()
def receive(self, source_id=Network.any_id):
self.__source_id.value = source_id
self.__enter_lock.release()
data = self.__queue.get()
self.__exit_lock.acquire()
return self.__source_id.value, data
class NetworkEndpoint(object):
def __init__(self, channel_id, channels):
self.__channels = channels
self.__my_id = channel_id
self.__my_channel = self.__channels[self.__my_id]
def send(self, destination_id, data):
if destination_id == self.__my_id:
raise RuntimeError('Worker {} tried to send data to itself.'.format(self.__my_id))
self.__channels[destination_id].send(self.__my_id, data)
def receive(self, worker_id=Network.any_id):
return self.__my_channel.receive(worker_id)
class Worker(multiprocessing.Process):
def __init__(self, worker_id, configuration, shared, network_endpoint):
multiprocessing.Process.__init__(self)
self.__worker_id = worker_id
self.__configuration = configuration
self.__shared = shared
self.__network_endpoint = network_endpoint
self.__my_data = None
self.__data_per_process_size = int(math.floor(self.__configuration.array_size / self.__configuration.n_workers))
self.__dimensions = int(math.floor(math.log(self.__configuration.n_workers, 2)))
@property
def __n_workers(self):
return self.__configuration.n_workers
def __barrier(self):
self.__shared.barrier.wait()
def _send(self, worker_id, data):
self.__network_endpoint.send(worker_id, data)
def _receive(self, worker_id=Network.any_id):
return self.__network_endpoint.receive(worker_id)
@staticmethod
def __generate_random_data(length):
return [random.randint(-2048, 2048) for _ in range(length)]
def __log(self, message):
print '[WORKER {}] {}'.format(self.__worker_id, message)
def __process(self, data):
# simulates data processing delay by sleeping
time.sleep(len(data) * self.__configuration.delay_process)
# MY METHODS
def quick_sort(self, items):
"""
Implementation of quick sort
"""
if len(items) > 1:
pivot_index = len(items) / 2
smaller_items = []
larger_items = []
for i, val in enumerate(items):
if i != pivot_index:
if val < items[pivot_index]:
smaller_items.append(val)
else:
larger_items.append(val)
self.quick_sort(smaller_items)
self.quick_sort(larger_items)
items[:] = smaller_items + [items[pivot_index]] + larger_items
return items
def handle_data_creation_and_distribution(self):
# Master process
if self.__worker_id == 0:
data_to_be_sorted = self.__generate_random_data(self.__configuration.array_size)
self.__log('Transmitting parts of data to other workers. Size of each chunk: {}'.format(
self.__data_per_process_size))
# Self assign first chunk of data
start_chunk_index = 0
self.__my_data = data_to_be_sorted[start_chunk_index:(start_chunk_index + self.__data_per_process_size)]
start_chunk_index += self.__data_per_process_size
# Broadcast data chunks
for worker_id in range(1, self.__n_workers):
self._send(worker_id,
data_to_be_sorted[start_chunk_index:(start_chunk_index + self.__data_per_process_size)])
start_chunk_index += self.__data_per_process_size
# Slave
else:
source_id, data = self._receive()
self.__my_data = data
self.__log('Received data from worker {}: {}'.format(source_id, data))
# Wait for all
self.__barrier()
def handle_data_gather_and_print(self):
sorted_data_complete = []
# Master process
if self.__worker_id == 0:
self.__log('Receiving data from workers...')
for worker_id in range(1, self.__n_workers):
source_id, data = self._receive(worker_id)
self.__log('Received data from worker {}: {}'.format(source_id, data))
sorted_data_complete.extend(data)
else:
self._send(0, self.__my_data)
self.__barrier()
if self.__worker_id == 0:
self.__log('Receivied all data!')
self.__log('Sorted list content')
for number in sorted_data_complete:
self.__log('{}'.format(number))
def compare_low(self, x):
minimum = None
counter = 0
list_of_received = [i % 1 for i in range(0, self.__data_per_process_size + 1)]
list_to_send = [i % 1 for i in range(0, self.__data_per_process_size + 1)]
# Paired process id
# Calculated by XOR with 1 shifted left x positions
paired_process_id = self.__worker_id ^ (1 << x)
# Send the biggest of the list and receive the smallest of the list
# We need to split it into two parts
# For the communication to take place
# Otherwise both processes would want to
# Send or receive at the same time - be blocked
# Process with lower id receives first
if self.__worker_id < paired_process_id:
# Receive new minimum of sorted numbers
source_id, minimum_marshalled = self._receive(paired_process_id)
minimum = minimum_marshalled[0]
# Process with higher id sends first
else:
# Send my biggest number to paired neighbour
# ( Last element is the biggest element, because of bitonic sequence)
# Marshalling because that is how i roll
self._send(paired_process_id, [self.__my_data[self.__data_per_process_size - 1]])
# Now it's time for the process with lower id to send
if self.__worker_id < paired_process_id:
# Send my biggest number to paired neighbour
# ( Last element is the biggest element, because of bitonic sequence)
# Marshalling because that is how i roll
self._send(paired_process_id, [self.__my_data[self.__data_per_process_size - 1]])
# This time process with higher id receives
else:
# Receive new minimum of sorted numbers
source_id, minimum_marshalled = self._receive(paired_process_id)
minimum = minimum_marshalled[0]
# Store all values which are bigger than minimum received from paired_process
for i in range(0, self.__data_per_process_size):
if self.__my_data[i] > minimum:
list_to_send[counter + 1] = self.__my_data[i]
counter += 1
else:
# This helps us to save big number of cycles
break
# First element in array, will be it's size
list_to_send[0] = counter
# Send all values that are greater than minimum, and receive
# Once again, we need to split it into two parts
# For the communication to take place
# Otherwise both processes would want to
# Send or receive at the same time - be blocked
# Process with lower id receives first
if self.__worker_id < paired_process_id:
# Receive partition from paired process
source_id, list_of_received = self._receive(paired_process_id)
# Process with higher id sends first
else:
# Send all elements that are bigger than minimum to
# The paired process
self._send(paired_process_id, list_to_send)
# Now it's time for the process with lower id to send
if self.__worker_id < paired_process_id:
# Send all elements that are bigger than minimum to
# The paired process
self._send(paired_process_id, list_to_send)
# This time process with higher id receives
else:
# Receive partition from paired process
source_id, list_of_received = self._receive(paired_process_id)
# Take all received values which are smaller than
# the current biggest element
for i in range(1, list_of_received[0] + 1):
if self.__my_data[self.__data_per_process_size - 1] < list_of_received[i]:
self.__my_data[self.__data_per_process_size - 1] = list_of_received[i]
else:
# This helps us to save big number of cycles
break
self.__my_data = self.quick_sort(self.__my_data)
def compare_high(self, x):
maximum = None
receiving_counter = 0
sending_counter = 0
list_of_received = [i % 1 for i in range(0, self.__data_per_process_size + 1)]
list_to_send = [i % 1 for i in range(0, self.__data_per_process_size + 1)]
# Paired process id
# Calculated by XOR with 1 shifted left x positions
paired_process_id = self.__worker_id ^ (1 << x)
# Receive maximum from paired process and send minimum to it
# We need to split it into two parts
# For the communication to take place
# Otherwise both processes would want to
# Send or receive at the same time - be blocked
# Process with lower id receives first
if self.__worker_id < paired_process_id:
# Receive new maximum of sorted numbers
source_id, maximum_marshalled = self._receive(paired_process_id)
maximum = maximum_marshalled[0]
# Process with higher id sends first
else:
# Send my smallest number to paired neighbour
# ( Last element is the biggest element, because of bitonic sequence)
# Marshalling because that is how i roll
self._send(paired_process_id, [self.__my_data[0]])
# Now it's time for the process with lower id to send
if self.__worker_id < paired_process_id:
# Send my smallest number to paired neighbour
# ( Last element is the biggest element, because of bitonic sequence)
# Marshalling because that is how i roll
self._send(paired_process_id, [self.__my_data[0]])
# This time process with higher id receives
else:
# Receive new maximum of sorted numbers
source_id, maximum_marshalled = self._receive(paired_process_id)
maximum = maximum_marshalled[0]
# Store all values which are smaller than maximum received from paired_process
for i in range(0, self.__data_per_process_size):
if self.__my_data[i] < maximum:
list_to_send[sending_counter + 1] = self.__my_data[i]
sending_counter += 1
else:
# This helps us to save big number of cycles
break
# Send all values that are smaller than maximum, and receive
# Once again, we need to split it into two parts
# For the communication to take place
# Otherwise both processes would want to
# Send or receive at the same time - be blocked
# Process with lower id receives first
if self.__worker_id < paired_process_id:
# Receive greater than min from paired process
source_id, list_of_received = self._receive(paired_process_id)
receiving_counter = list_of_received[0]
# Process with higher id sends first
else:
# Send all elements that are smaller than maximum to
# The paired process
self._send(paired_process_id, list_to_send)
# Now it's time for the process with lower id to send
if self.__worker_id < paired_process_id:
# Send all elements that are smaller than maximum to
# The paired process
self._send(paired_process_id, list_to_send)
# This time process with higher id receives
else:
# Receive greater than min from paired process
source_id, list_of_received = self._receive(paired_process_id)
receiving_counter = list_of_received[0]
# Take values from process which are greater than current minimum
for i in range(1, receiving_counter + 1):
if list_of_received[i] > self.__my_data[0]:
self.__my_data[0] = list_of_received[i]
else:
# This helps us to save big number of cycles
break
self.__my_data = self.quick_sort(self.__my_data)
def run(self):
self.__log('Started.')
self.handle_data_creation_and_distribution()
self.__my_data = self.quick_sort(self.__my_data)
for i in range(0, self.__dimensions):
j = i
while j >= 0:
# (window_id is even AND jth bit of process is 0)
# OR (window_id is odd AND jth bit of process is 1)
if (((self.__worker_id >> (i + 1) % 2) == 0) and (((self.__worker_id >> j) % 2) == 0)) or ((((self.__worker_id >> (i + 1)) % 2) != 0) and (((self.__worker_id >> j) % 2) != 0)):
self.compare_low(j)
else:
self.compare_high(j)
j -= 1
self.__barrier()
self.handle_data_gather_and_print()
self.__log('Terminated.')
def main():
random.seed()
configuration = _parse_args()
system = DistributedSystem(configuration)
system.run()
if __name__ == '__main__':
sys.exit(main())
|
"""
admintools.py
utilities to add users, courses, etc.
--- courses_jan2018.csv ----
name,name_as_title,path,faculty
Programming Workshop,Programming<br>Workshop,spring2018/workshop,mahoney
Jim's Tutorials,Jim's<br>Tutorials,spring2018/jims_tutorials,mahoney
...
--- students_2018.csv ---
name,username,course
<NAME>,ct,modern_physics
<NAME>,jeffa,modern_physics
...
--- users.csv ---------
name,username
<NAME>,jsmith
interactively create users.csv from nook html listing
and then add them to umber's sql database :
$ cd /var/www/umber
$ . env/production
$ umber_console
>>> from admintools import *
>>> os.chdir('/var/www/cours/etc/misc') # folder for .html , .csv
>>> users = parse_directory('nookfile.html')
>>> make_csv(users, 'users.csv')
>>> add_users('users.csv')
"""
from model import Course, Person
import csv, random, re, sys, os
term = '2020-09-01' # CHANGEME
termfolder = 'fall2020' # CHANGEME
def read_csv(filename):
""" Return list of dicts from file.csv """
lines = []
with open(filename) as csv_file:
csv_reader = csv.DictReader(csv_file)
for line in csv_reader:
lines.append(line)
return lines
def pwd(): return os.getcwd()
def cd(folder): os.chdir(folder); return None
def make_faculty():
Person.create_person(
username = 'mahoney',
name = '<NAME>',
email = '<EMAIL>',
password = <PASSWORD>..', # RESETME!
is_admin = True
)
#Person.create_person(
# username = 'matt',
# name = '<NAME>',
# email = '<EMAIL>',
# password = '<PASSWORD>*'
#)
#Person.create_person(
# username = 'kminden',
# name = '<NAME>',
# email = '<EMAIL>',
# password = '<PASSWORD>*'
#)
def make_courses(csvfilename='courses_jan2018.csv'):
""" create courses from a .csv file defining them """
# csvfile : name, name_as_title, path, faculty
for row in csv.DictReader(open(csvfilename)):
course = Course.create_course(
name = row['name'],
name_as_title = row['name_as_title'],
path = row['path'],
start = term,
copy_generic = True
)
faculty = Person.by_username(row['faculty'])
course.enroll(faculty, 'faculty', spring2018)
def read_populi_csv(csvfilename):
""" Return user data from student populi csv export """
# populi csv export fields :
# "Student ID",Prefix,"First Name","Preferred Name",
# "Middle Name","Last Name","Former Name",Email,
# Street,City,State,ZIP,Country,Phone,Type,"Receives Mail"
# If there is a 'Preferred Name', use that as the first,
# then let 'name' be 'first last'. Ignore (prefix, middle, former)
# Return fields for (username,name,email,password)
result = []
for row in csv.DictReader(open(csvfilename)):
if row['Type']=='faculty': continue
name = (row['Preferred Name'] or row['First Name']) + \
' ' + row['Last Name']
email = row['Email']
result.append({'name':name, 'email':email})
return result
def email_to_username(email):
return email.split('@')[0]
def _add_users(users, course=None, date='2020-09-01'):
for user in users:
username = email_to_username(user['email'])
# This is actually get_or_create ;
# it does do the right thing if the user exists already.
student = Person.create_person(name=user['name'],
email=user['email'],
username=username,
password='')
if course:
course.enroll(student, 'student',
datestring=date, create_work=True)
def add_users(csvfilename, read_csv=read_populi_csv,
course=None, date='2020-09-01'):
""" create users and optionally enroll in a course """
users = read_csv(csvfilename)
_add_users(users, course=course, date=date)
def parse_directory(filename):
""" read html file (nook directory listing),
return users as [{'name':..., 'username':...},...] """
try:
file = open(filename)
html = file.read()
file.close()
except:
return []
users = []
for match in re.finditer(r'<b>([^<]+)</b>.*?mailto:([^@]+)@', html):
groups = match.groups()
users.append({'name':groups[0], 'username':groups[1]})
users.sort(key=lambda x:x['username'])
return users
def make_csv(userdict, csvfilename):
""" create csv file from user dictionary """
csv = open(csvfilename, 'w')
cs.write('name,username\n')
for u in userdict:
csv.write("{},{}\n".format(u['name'], u['username']))
csv.close()
|
class Levenshtein:
def __init__(self, alphabet, symbols, weight_dict = {}, language = None):
""" Initialize the levenshtein distance with a specific alphabet and
set of symbols and an optional predefined set of weights.
Parameters
----------
alphabet: string
A string with all the letters used in the language. It is not
necessary to add the lower and upper letters, the class create
both from the input
Example: alphabet = 'abcdá'
symbols: string
A string with all the symbols used in the language.
Example: symbols = '?! '
weight_dict:
dictionary of str: tuple(delete, insert, substitute), optional
Keyword parameters setting the costs for characters with the
desired weights for the characters in the alphabet and symbols
parameters.
delete: int
Cost for delete a specific character (default 1)
insert: int
Cost for insert a specific character (default 1)
substitute: int
Cost for substitute a specific character (default 1)
(default {char: (1, 1, 1)} for all characters in the alphabet)
Example: weight_dict = {'a': (1, 2, 3), 'á': (3, 1, 5)}
language: string, optional
A string description for the language intended to use the class.
(default None)
Raises
------
NotImplementedError
Returns
-------
None
"""
self.lower_alphabet = alphabet.lower()
self.upper_alphabet = alphabet.upper()
self.symbols = symbols
self.alphabet = self.lower_alphabet + self.upper_alphabet + self.symbols
self.weight_dict = {}
for item in self.alphabet:
self.weight_dict[item] = (1, 1, 1)
self.weight_dict.update(weight_dict)
self.language = language
def __update_alphabet(self, letter, weights):
""" Add a new letter to the alphabet with the desired weights or
update the existing weights values.
Parameters
----------
letter: string
A leter to add or update in the weights dictionary and alphabet
weights: tuple(int, int, int)
The weights desired to used.
Raises
------
NotImplementedError
Returns
-------
None
"""
if letter.lower() not in self.lower_alphabet:
self.lower_alphabet += letter.lower()
self.upper_alphabet += letter.upper()
updated_weights = {letter.lower(): weights, letter.upper(): weights}
self.weight_dict.update(updated_weights)
self.alphabet = self.lower_alphabet + self.upper_alphabet + self.symbols
return None
def __update_symbols(self, symbol, weights):
""" Add a new symbol to the alphabet with the desired weights or
update the existing weights values.
Parameters
----------
symbol: string
A symbol to add or update in the weights dictionary and alphabet
weights: tuple(int, int, int)
The weights desired to used.
Raises
------
NotImplementedError
Returns
-------
None
"""
if symbol not in self.symbols:
self.symbols += symbol
self.weight_dict.update({symbol: weights})
self.alphabet = self.lower_alphabet + self.upper_alphabet + self.symbols
return None
def __update_weights(self, item, weights = (1, 1, 1)):
""" Add a new letter or symbol to the alphabet with the desired weights
or update the existing weights values.
Parameters
----------
item: string
A letter or symbol to add in the alphabet and to add or update the
weights dictionary
weights: tuple(int, int, int), optional
The weights desired to used (default tuple(1, 1, 1)).
Raises
------
NotImplementedError
Returns
-------
None
"""
if item.isalpha():
self.__update_alphabet(item, weights)
else:
self.__update_symbols(item, weights)
return None
def iterative_matrix(self, source_input, target_input):
""" Create the iterative matrix to compute the Levenshtein distance
between two strings.
This code was taken from
https://www.python-course.eu/levenshtein_distance.php
It was modified but the core idea is from that website.
Parameters
----------
source_input: string
A string to compare with the target_input
target_input: string
A string to compare with the source_input
Raises
------
NotImplementedError
Returns
-------
tuple(dist, row, col)
dist: list
It will contain the Levenshtein distance between the first
i characters of source_input and the first j characters of
target_input
row: int
The number of characters for the source_input
col: int
The number of characters for the target_input
"""
w = self.weight_dict
rows = len(source_input) + 1
cols = len(target_input) + 1
dist = [ [0 for x in range(cols)] for x in range(rows) ]
# source prefixes can be transformed into empty strings
# by deletions:
for row in range(1, rows):
# Validating if the item exist in the alphabet. In the case the
# item does not exist, then update the alphabet with it.
try:
dist[row][0] = dist[row - 1][0] + w[source_input[row - 1]][0]
except:
new_item = source_input[row - 1]
self.__update_weights(new_item)
dist[row][0] = dist[row - 1][0] + w[source_input[row - 1]][0]
# target prefixes can be created from an empty source string
# by inserting the characters
for col in range(1, cols):
# Validating if the item exist in the alphabet. In the case the
# item does not exist, then update the alphabet with it.
try:
dist[0][col] = dist[0][col - 1] + w[target_input[col - 1]][1]
except:
new_item = target_input[col - 1]
self.__update_weights(new_item)
dist[0][col] = dist[0][col - 1] + w[target_input[col - 1]][1]
for col in range(1, cols):
for row in range(1, rows):
deletes = w[source_input[row - 1]][0]
inserts = w[target_input[col - 1]][1]
subs = max( (w[source_input[row - 1]][2], w[target_input[col - 1]][2]) )
if source_input[row - 1] == target_input[col - 1]:
subs = 0
else:
subs = subs
dist[row][col] = min(dist[row - 1][col] + deletes, # delete
dist[row][col-1] + inserts, # insert
dist[row - 1][col - 1] + subs) # substitution
# This step is to fix the error due to empty string
if cols == 1:
col = 0
if rows == 1:
row = 0
return dist, row, col
def distance(self, source_input, target_input):
""" Compute the Levenshtein distance between the strings provided.
Parameters
----------
source_input: string
A string to compare with the target_input
target_input: string
A string to compare with the source_input
Raises
------
NotImplementedError
Returns
-------
int
The minimum number of insert, delete and substitutions to transform
one string into other
"""
dist, row, col = self.iterative_matrix(source_input, target_input)
return dist[row][col]
def similarity(self, source_input, target_input):
""" Compute the Levenshtein similarity between the strings provided.
Parameters
----------
source_input: string
A string to compare with the target_input
target_input: string
A string to compare with the source_input
Raises
------
NotImplementedError
Returns
-------
int
It will contain the Levenshtein distance between the first i
characters of source_input and the first j characters of
target_input
"""
dist, row, col = self.iterative_matrix(source_input, target_input)
sim = 1 - (dist[row][col] / max(row, col))
return sim
|
<gh_stars>1-10
import os
import sys
import io
import filecmp
import pytest
from pathlib import Path
from pyneid.neid import Neid
from astropy.table import Table,Column
# These tests are designed to be run inside the
# Docker container built with the Dockerfile
# at the top level of the repo.
# dummy user pyneidprop with limited access
userdict = {
"pyneidprop_pielemonquietyellow":"Successfully login as pyneidprop",
"xxpyneidprop_pielemonquietyellow":"Failed to login: invalid userid = xxpyneidprop",
"pyneidprop_xxpielemonquietyellow":"Failed to login: invalid password"
}
#
# test login method: correctly, wrong userid, and wrong password
#
@pytest.mark.parametrize ("user, expected", list(userdict.items()), \
ids=list(userdict.keys()))
def test_login (user, expected, capsys):
ind = user.index('_')
userid = user[0:ind]
password = user[ind+1:]
Neid.login (cookiepath='./neidtestcookie.txt', \
userid=userid, \
password=password)
out, err = capsys.readouterr()
assert out.startswith (expected)
#
# test query_datetime method for all datalevel;
# but currently only l0 and l1 contains data for the test user.
#
# returned metadata files are compared with the truth data for validation.
#
datetimedict = {
"l0":"2021-01-16 06:10:55/2021-01-16 23:59:59", \
"l1":"2021-01-16 06:10:55/2021-01-16 23:59:59"
}
@pytest.mark.parametrize ("datalevel,datetime", list(datetimedict.items()), \
ids=list(datetimedict.keys()))
def test_query_datetime (datalevel, datetime, capsys):
outpath = './datetime.' + datalevel + '.tbl'
datapath = './truth_data/datetime.' + datalevel + '.tbl'
Neid.query_datetime (datalevel, \
datetime, \
cookiepath='./neidtestcookie.txt', \
format='ipac', \
outpath=outpath)
assert os.path.exists(outpath), \
f'Result not downloaded to file [{outpath:s}]'
if (datalevel == 'l0'):
assert (filecmp.cmp (outpath, datapath, shallow=False))
elif (datalevel == 'l1'):
astropytbl = None
astropytbl = Table.read (outpath, format='ascii.ipac')
assert (astropytbl is not None), \
"f{outpath:s} cannot be read by astropy"
astropytbl_truth = None
astropytbl_truth = Table.read (datapath, format='ascii.ipac')
assert (astropytbl_truth is not None), \
"f{datapath:s} cannot be read by astropy"
assert (len(astropytbl) == len(astropytbl_truth)), \
f"Number of records in {outpath:s} is incorrect"
#
# test query_position method for all datalevel;
# but currently only l0 and l1 contains data for the test user.
#
# returned metadata files are compared with the truth data for validation.
#
posdict = {
"l0": "circle 23.634 68.95 1.0", \
"l1": "circle 23.634 68.95 1.0"
}
@pytest.mark.parametrize ("datalevel,pos", list(posdict.items()), \
ids=list(posdict.keys()))
def test_query_position (datalevel, pos, capsys):
outpath = './pos.' + datalevel + '.tbl'
datapath = './truth_data/pos.' + datalevel + '.tbl'
Neid.query_position (datalevel, \
pos, \
cookiepath='./neidtestcookie.txt', \
format='ipac',
outpath=outpath)
assert os.path.exists(outpath), \
f'Result not downloaded to file [{outpath:s}]'
#assert (filecmp.cmp (outpath, datapath, shallow=False))
astropytbl = None
astropytbl = Table.read (outpath, format='ascii.ipac')
assert (astropytbl is not None), \
"f{outpath:s} cannot be read by astropy"
astropytbl_truth = None
astropytbl_truth = Table.read (datapath, format='ascii.ipac')
assert (astropytbl_truth is not None), \
"f{datapath:s} cannot be read by astropy"
assert (len(astropytbl) >= len(astropytbl_truth)), \
f"Number of records in {outpath:s} is incorrect"
#
# test query_object method using l1 data
#
def test_query_object():
outpath = './object.l1.tbl'
datapath = './truth_data/object.l1.tbl'
Neid.query_object ('l1', \
'HD 9407', \
cookiepath='./neidtestcookie.txt', \
format='ipac', \
outpath=outpath)
assert os.path.exists(outpath), \
f'Result not downloaded to file [{outpath:s}]'
#assert (filecmp.cmp (outpath, datapath, shallow=False))
astropytbl = None
astropytbl = Table.read (outpath, format='ascii.ipac')
assert (astropytbl is not None), \
"f{outpath:s} cannot be read by astropy"
astropytbl_truth = None
astropytbl_truth = Table.read (datapath, format='ascii.ipac')
assert (astropytbl_truth is not None), \
"f{datapath:s} cannot be read by astropy"
assert (len(astropytbl) >= len(astropytbl_truth)), \
f"Number of records in {outpath:s} is incorrect"
#
# test query_qobject method using l1 data
#
def test_query_qobject():
outpath = './qobject.l1.tbl'
datapath = './truth_data/qobject.l1.tbl'
Neid.query_qobject ('l1', \
'Gaia DR2', \
cookiepath='./neidtestcookie.txt', \
format='ipac', \
outpath=outpath)
assert os.path.exists(outpath), \
f'Result not downloaded to file [{outpath:s}]'
#assert (filecmp.cmp (outpath, datapath, shallow=False))
astropytbl = None
astropytbl = Table.read (outpath, format='ascii.ipac')
assert (astropytbl is not None), \
"f{outpath:s} cannot be read by astropy"
astropytbl_truth = None
astropytbl_truth = Table.read (datapath, format='ascii.ipac')
assert (astropytbl_truth is not None), \
"f{datapath:s} cannot be read by astropy"
assert (len(astropytbl) >= len(astropytbl_truth)), \
f"Number of records in {outpath:s} is incorrect"
#
# test query_program method using l1 data
#
def test_query_program():
outpath = './program.l1.tbl'
datapath = './truth_data/program.l1.tbl'
Neid.query_program ('l1', \
'2021A-2014', \
cookiepath='./neidtestcookie.txt', \
format='ipac', \
outpath=outpath)
assert os.path.exists(outpath), \
f'Result not downloaded to file [{outpath:s}]'
#assert (filecmp.cmp (outpath, datapath, shallow=False))
astropytbl = None
astropytbl = Table.read (outpath, format='ascii.ipac')
assert (astropytbl is not None), \
"f{outpath:s} cannot be read by astropy"
astropytbl_truth = None
astropytbl_truth = Table.read (datapath, format='ascii.ipac')
assert (astropytbl_truth is not None), \
"f{datapath:s} cannot be read by astropy"
assert (len(astropytbl) >= len(astropytbl_truth)), \
f"Number of records in {outpath:s} is incorrect"
#
# test query_criteria method using l1 data
#
def test_query_criteria():
outpath = './criteria.l1.tbl'
datapath = './truth_data/criteria.l1.tbl'
param = dict()
param['datalevel'] = 'l1'
param['datetime'] = '2021-01-01 00:00:00/2021-04-19 23:59:59'
param['object'] = 'HD 9407'
Neid.query_criteria (param, \
cookiepath='./neidtestcookie.txt', \
format='ipac', \
outpath=outpath)
assert os.path.exists(outpath), \
f'Result not downloaded to file [{outpath:s}]'
astropytbl = None
astropytbl = Table.read (outpath, format='ascii.ipac')
assert (astropytbl is not None), \
"f{outpath:s} cannot be read by astropy"
astropytbl_truth = None
astropytbl_truth = Table.read (datapath, format='ascii.ipac')
assert (astropytbl_truth is not None), \
"f{datapath:s} cannot be read by astropy"
assert (len(astropytbl) >= len(astropytbl_truth)), \
f"Number of records in {outpath:s} is incorrect"
#
# test query_adql method using l1 data
#
def test_qeury_adql():
outpath = './adql.l1.tbl'
datapath = './truth_data/adql.l1.tbl'
query = "select l1filename, l1filepath, l1propint, qobject, object, qra, qdec, to_char(obsdate,'YYYY-MM-DD HH24:MI:SS.FF3') as date_obs, exptime, obsmode, obstype, program, piname, datalvl, seeing, airmass, moonagl, qrad as ra, qdecd as dec from neidl1 where ((obsdate >= to_date('2020-01-01 06:10:55', 'yyyy-mm-dd HH24:MI:SS') and obsdate <= to_date('2021-04-19 23:59:59', 'yyyy-mm-dd HH24:MI:SS')) and (qdecd >= -90.)) order by obsdate"
Neid.query_adql (query, \
cookiepath='./neidtestcookie.txt', \
format='ipac', \
outpath=outpath)
assert os.path.exists(outpath), \
f'Result not downloaded to file [{outpath:s}]'
#assert (filecmp.cmp (outpath, datapath, shallow=False))
astropytbl = None
astropytbl = Table.read (outpath, format='ascii.ipac')
assert (astropytbl is not None), \
"f{outpath:s} cannot be read by astropy"
astropytbl_truth = None
astropytbl_truth = Table.read (datapath, format='ascii.ipac')
assert (astropytbl_truth is not None), \
"f{datapath:s} cannot be read by astropy"
assert (len(astropytbl) >= len(astropytbl_truth)), \
f"Number of records in {outpath:s} is incorrect"
#
# test query_adql method:
# download the first two files from metadata file criteria.l1.tbl
#
dnloaddict = {
"l0":"./datetime.l0.tbl", \
"l1":"./criteria.l1.tbl"
}
@pytest.mark.parametrize ("datalevel,metatbl", list(dnloaddict.items()), \
ids=list(dnloaddict.keys()))
def test_download(datalevel, metatbl, capsys):
#
# Check if metadata file contains datalevel + 'filepath' column
#
astropytbl = Table.read (metatbl, format='ascii.ipac')
len_col = len(astropytbl.colnames)
ind_filepathcol = -1
for i in range (0, len_col):
colname = datalevel + 'filepath'
if (astropytbl.colnames[i].lower() == colname):
ind_filepathcol = i
assert (ind_filepathcol >= 0), \
"filepath column doesn't exit in metadata table"
#
# Make sure ./dnload_dir is empty
#
dnloaddir = './dnload_dir'
srow = 0
erow = 1
if (os.path.exists (dnloaddir)):
files = os.listdir(dnloaddir)
for f in files:
os.remove(dnloaddir + '/'+f)
Neid.download(metatbl, \
datalevel, \
'ipac', \
dnloaddir, \
cookiepath='./neidtestcookie.txt', \
start_row=srow, \
end_row=erow)
for i in range (srow, erow):
filepath = astropytbl[i][ind_filepathcol]
ind = filepath.rindex ('/')
filename = filepath[ind+1:]
print (f'filename= {filename:s}')
print (f'filepath= {filepath:s}')
dnloaded = dnloaddir + '/' + filename
assert (os.path.exists (dnloaded))
filesize = Path (dnloaded).stat().st_size
assert (filesize > 100000)
|
<gh_stars>0
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
from fabric.api import *
from fabric.contrib.files import exists
import logging
import argparse
import json
import sys
import os
from dlab.ssn_lib import *
from dlab.fab import *
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', type=str, default='')
parser.add_argument('--keyfile', type=str, default='')
parser.add_argument('--additional_config', type=str, default='{"empty":"string"}')
parser.add_argument('--dlab_path', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--cloud_provider', type=str, default='')
parser.add_argument('--os_family', type=str, default='')
parser.add_argument('--request_id', type=str, default='')
parser.add_argument('--resource', type=str, default='')
parser.add_argument('--service_base_name', type=str, default='')
parser.add_argument('--tag_resource_id', type=str, default=None)
parser.add_argument('--account_id', type=str, default=None)
parser.add_argument('--billing_bucket', type=str, default=None)
parser.add_argument('--aws_job_enabled', type=str, default=None)
parser.add_argument('--report_path', type=str, default=None)
parser.add_argument('--authentication_file', type=str, default=None)
parser.add_argument('--offer_number', type=str, default=None)
parser.add_argument('--currency', type=str, default=None)
parser.add_argument('--locale', type=str, default=None)
parser.add_argument('--region_info', type=str, default=None)
parser.add_argument('--billing_enabled', type=str, default=False)
parser.add_argument('--ldap_login', type=str, default=None)
parser.add_argument('--tenant_id', type=str, default=None)
parser.add_argument('--application_id', type=str, default=None)
parser.add_argument('--subscription_id', type=str, default=None)
parser.add_argument('--datalake_store_name', type=str, default=None)
parser.add_argument('--validate_permission_scope', type=str, default=None)
parser.add_argument('--mongo_parameters', type=str, default='')
parser.add_argument('--dlab_id', type=str, default=None)
parser.add_argument('--usage_date', type=str, default=None)
parser.add_argument('--product', type=str, default=None)
parser.add_argument('--usage_type', type=str, default=None)
parser.add_argument('--usage', type=str, default=None)
parser.add_argument('--cost', type=str, default=None)
parser.add_argument('--resource_id', type=str, default=None)
parser.add_argument('--tags', type=str, default=None)
args = parser.parse_args()
dlab_conf_dir = args.dlab_path + 'conf/'
web_path = args.dlab_path + 'webapp/lib/'
local_log_filename = "{}_UI.log".format(args.request_id)
local_log_filepath = "/logs/" + args.resource + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
mongo_passwd = id_generator()
keystore_passwd = id_generator()
def copy_ssn_libraries():
sudo('mkdir -p /usr/lib/python2.7/dlab/')
run('mkdir -p /tmp/dlab_libs/')
local('scp -i {} /usr/lib/python2.7/dlab/* {}:/tmp/dlab_libs/'.format(args.keyfile, env.host_string))
run('chmod a+x /tmp/dlab_libs/*')
sudo('mv /tmp/dlab_libs/* /usr/lib/python2.7/dlab/')
if exists('/usr/lib64'):
sudo('ln -fs /usr/lib/python2.7/dlab /usr/lib64/python2.7/dlab')
return True
def configure_mongo(mongo_passwd):
try:
if not exists("/lib/systemd/system/mongod.service"):
if os.environ['conf_os_family'] == 'debian':
local('sed -i "s/MONGO_USR/mongodb/g" /root/templates/mongod.service_template')
elif os.environ['conf_os_family'] == 'redhat':
local('sed -i "s/MONGO_USR/mongod/g" /root/templates/mongod.service_template')
local('scp -i {} /root/templates/mongod.service_template {}:/tmp/mongod.service'.format(args.keyfile,
env.host_string))
sudo('mv /tmp/mongod.service /lib/systemd/system/mongod.service')
sudo('systemctl daemon-reload')
sudo('systemctl enable mongod.service')
local('sed -i "s|PASSWORD|{}|g" /root/scripts/resource_status.py'.format(mongo_passwd))
local('scp -i {} /root/scripts/resource_status.py {}:/tmp/resource_status.py'.format(args.keyfile,
env.host_string))
sudo('mv /tmp/resource_status.py ' + os.environ['ssn_dlab_path'] + 'tmp/')
local('sed -i "s|PASSWORD|{}|g" /root/scripts/configure_mongo.py'.format(mongo_passwd))
local('scp -i {} /root/scripts/configure_mongo.py {}:/tmp/configure_mongo.py'.format(args.keyfile,
env.host_string))
sudo('mv /tmp/configure_mongo.py ' + args.dlab_path + 'tmp/')
local('scp -i {} /root/files/mongo_roles.json {}:/tmp/mongo_roles.json'.format(args.keyfile,
env.host_string))
sudo('mv /tmp/mongo_roles.json ' + args.dlab_path + 'tmp/')
mongo_parameters = json.loads(args.mongo_parameters)
sudo("python " + args.dlab_path + "tmp/configure_mongo.py --dlab_path {} --mongo_parameters '{}'".format(
args.dlab_path, json.dumps(mongo_parameters)))
return True
except Exception as err:
print(err)
return False
##############
# Run script #
##############
if __name__ == "__main__":
print("Configure connections")
try:
env['connection_attempts'] = 100
env.key_filename = [args.keyfile]
env.host_string = args.os_user + '@' + args.hostname
deeper_config = json.loads(args.additional_config)
except:
sys.exit(2)
print("Copying DLab libraries to SSN")
if not copy_ssn_libraries():
logging.error('Failed to copy DLab libraries')
sys.exit(1)
print("Installing Supervisor")
if not ensure_supervisor():
logging.error('Failed to install Supervisor')
sys.exit(1)
print("Installing MongoDB")
if not ensure_mongo():
logging.error('Failed to install MongoDB')
sys.exit(1)
print("Configuring MongoDB")
if not configure_mongo(mongo_passwd):
logging.error('MongoDB configuration script has failed.')
sys.exit(1)
sudo('echo DLAB_CONF_DIR={} >> /etc/profile'.format(dlab_conf_dir))
sudo('echo export DLAB_CONF_DIR >> /etc/profile')
print("Starting Self-Service(UI)")
if not start_ss(args.keyfile, env.host_string, dlab_conf_dir, web_path,
args.os_user, mongo_passwd, keystore_passwd, args.cloud_provider,
args.service_base_name, args.tag_resource_id, args.account_id,
args.billing_bucket, args.aws_job_enabled, args.dlab_path, args.billing_enabled,
args.authentication_file, args.offer_number, args.currency, args.locale,
args.region_info, args.ldap_login, args.tenant_id, args.application_id,
args.hostname, args.datalake_store_name, args.subscription_id, args.validate_permission_scope,
args.dlab_id, args.usage_date, args.product, args.usage_type,
args.usage, args.cost, args.resource_id, args.tags):
logging.error('Failed to start UI')
print('Failed to UI')
sys.exit(1)
sys.exit(0)
|
<gh_stars>1-10
import server as s
import functions as f
import accounts as a
import os
import json
def mainMenu():
"""
Displays the main menu of the setup script.
"""
os.system('cls' if os.name == 'nt' else 'clear')
print('Main Menu')
print('1 - Set Canvas Instance URL')
print('2 - Set District Account ID')
print('3 - Set Canvas Access Token')
print('4 - Select Accounts to Sync')
print('5 - Select Terms to Sync')
print('6 - Set Ignored Courses (If specific courses should be skipped.)')
print('7 - Set Courses to Sync (If only select courses are synced)')
print('8 - Set Run Path')
print('9 - Help')
print('10 - Exit')
userInput = input('Select an option: ')
userInput = userInput.strip() # Removes the trailing space from the path.
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
if userInput == '1':
os.system('cls' if os.name == 'nt' else 'clear')
setURL()
elif userInput == '2':
os.system('cls' if os.name == 'nt' else 'clear')
setDistrictID()
elif userInput == '3':
os.system('cls' if os.name == 'nt' else 'clear')
setAccessToken()
elif userInput == '4':
os.system('cls' if os.name == 'nt' else 'clear')
setAccountsToSync()
elif userInput == '5':
os.system('cls' if os.name == 'nt' else 'clear')
print('Do Something')
elif userInput == '6':
os.system('cls' if os.name == 'nt' else 'clear')
print('Do Something')
elif userInput == '7':
os.system('cls' if os.name == 'nt' else 'clear')
print('Do Something')
elif userInput == '8':
os.system('cls' if os.name == 'nt' else 'clear')
setPath()
elif userInput == '9':
os.system('cls' if os.name == 'nt' else 'clear')
print('Do Something')
elif userInput == '10':
os.system('cls' if os.name == 'nt' else 'clear')
print('Goodbye!')
exit(0)
else:
os.system('cls' if os.name == 'nt' else 'clear')
print('Invalid choice. Please input a valid choice.')
mainMenu()
def writeJSON(variables):
"""
Writes the variables.json file.
"""
with open('variables.json', 'w') as jsonFile:
json.dump(variables, jsonFile, indent=4)
def setURL():
"""
Sets the Canvas URL variable.
"""
print('Please enter the url for your Canvas instance. '
'It should be complete, from the https to the .com. '
'i.e https://yourschool.instructure.com'
)
userInput = input("Your url: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
variables = f.getVariables()
variables['canvasURL'] = userInput
writeJSON(variables)
print(f'URL set to {userInput}')
mainMenu()
def setDistrictID():
"""
Sets the Canvas district account id variable. This is the base
account id for your Canvas instance.
"""
print('Please enter the district account id for the base account '
'of your Canvas instance. It should be a number.'
)
userInput = input("District Account ID: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
variables = f.getVariables()
variables['districtAccountID'] = userInput
writeJSON(variables)
print(f'District account iID set to {userInput}')
mainMenu()
def setAccessToken():
"""
Sets the Canvas access token.
"""
print('Please enter the access token for Canvas. This needs to be '
'for an admin account.'
)
userInput = input("District Account ID: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
variables = f.getVariables()
variables['accessToken'] = userInput
writeJSON(variables)
print(f'Canvas access token set to {userInput}')
mainMenu()
def setAccountsToSync():
"""
Sets the Canvas sub accounts from which you want to sync grades.
"""
variables = f.getVariables()
accounts = a.getAccounts()
if 'accounts' in variables:
sAccounts = variables['accounts']
else:
sAccounts = []
sAccountIDs = []
for account in sAccounts:
sAccountIDs.append(account['id'])
print('Accounts Menu')
print('1 - See Currently Syncing Accounts')
print('2 - Add Account')
print('3 - Remove Account')
print('4 - Main Menu')
userInput = input("Option: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
if userInput == '1':
print('')
print('Currently Syncing Accounts')
if len(sAccounts) > 0:
for account in sAccounts:
print('Account ID - Account Name')
print(f'{account["id"]} - {account["name"]}')
else:
print('There are not any accounts set up to sync.')
print('')
setAccountsToSync()
elif userInput == '2':
accountIDs = []
print('')
print('Account ID - Account Name')
for account in accounts:
if account['id'] in sAccountIDs:
continue
print(f'{account["id"]} - {account["name"]}')
accountIDs.append(account['id'])
userInput = input("Account to add: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
if not userInput.isdigit():
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
if int(userInput) not in accountIDs:
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
for account in accounts:
if account['id'] == int(userInput):
sAccounts.append(account)
print('')
elif userInput == '3':
print('')
print('Account ID - Account Name')
for account in sAccounts:
print(f'{account["id"]} - {account["name"]}')
userInput = input("Account to remove: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
if not userInput.isdigit():
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
if int(userInput) not in sAccountIDs:
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
for account in sAccounts:
if account['id'] == int(userInput):
sAccounts.remove(account)
print('')
elif userInput =='4':
mainMenu()
else:
os.system('cls' if os.name == 'nt' else 'clear')
print('Invalid choice. Please input a valid choice.')
setAccountsToSync()
variables['accounts'] = sAccounts
# print('Please enter the district account id for the base account '
# 'of your Canvas instance. It should be a number.'
# )
# userInput = input("District Account ID: ")
# userInput = userInput.strip()
# userInput = userInput.replace('"', "")
# userInput = userInput.replace("'", '')
# variables = f.getVariables()
# variables['districtAccountID'] = userInput
writeJSON(variables)
setAccountsToSync()
# print(f'District account iID set to {userInput}')
# mainMenu()
def setPath():
"""
Sets the run path of the script.
"""
print('If you are running this script as a scheduled task, you '
'need to put the full path to the script folder. i.e. '
'/home/sync/Canvas-to-Infinite-Campus-Grade-Sync/python/'
)
userInput = input("Path: ")
userInput = userInput.strip()
userInput = userInput.replace('"', "")
userInput = userInput.replace("'", '')
variables = f.getVariables()
variables['path'] = userInput
writeJSON(variables)
print(f'Path set to {userInput}')
mainMenu()
mainMenu()
|
### Copyright 2014, MTA SZTAKI, www.sztaki.hu
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
"""
Redis_ implementation of the OCCO
:class:`~occo.infobroker.kvstore.KeyValueStore`.
.. moduleauthor:: <NAME> <<EMAIL>>
.. _Redis: http://redis.io/
"""
__all__ = ['RedisKVStore']
import occo.infobroker.kvstore as kvs
import occo.exceptions as exc
import occo.util.factory as factory
import occo.util as util
from ruamel import yaml
import logging
import redis
log = logging.getLogger('occo.infobroker.kvstore.redis')
class RedisConnectionData(object):
def __init__(self, host, port, db):
self.host, self.port, self.db = host, port, db
def __hash__(self):
return hash((self.host, self.port, self.db))
def __eq__(self, other):
return (self.host, self.port, self.db) == \
(other.host, other.port, other.db)
def __repr__(self):
return '{0.__class__.__name__!s}({0.host!r}, {0.port!r}, {0.db!r})' \
.format(self)
def __str__(self):
return '{0}:{1}/{2}'.format(self.host, self.port, self.db)
class RedisConnectionPools:
connection_pools = dict()
@staticmethod
def get(rcd):
pools = RedisConnectionPools.connection_pools
if not rcd in pools:
pools[rcd] = redis.ConnectionPool(
host=rcd.host, port=rcd.port, db=rcd.db, decode_responses=True)
return pools[rcd]
class DBSelectorKey(object):
def __init__(self, key, kvstore):
dbname, newkey = self.splitkey(key)
if dbname in kvstore.altdbs:
self.db = kvstore.altdbs[dbname]
self.key = newkey
else:
self.db = kvstore.default_db
self.key = key
self.rcd = RedisConnectionData(kvstore.host, kvstore.port, self.db)
def splitkey(self, key):
parts = key.split(':', 1)
if len(parts) > 1:
return parts
return None, key
def get_connection(self):
conn = redis.StrictRedis(
connection_pool=RedisConnectionPools.get(self.rcd))
return conn, self.key
def __str__(self):
return '{0}::{1}'.format(self.rcd, self.key)
@factory.register(kvs.KeyValueStore, 'redis')
class RedisKVStore(kvs.KeyValueStore):
"""
Redis implementation of :class:`~occo.infobroker.kvstore.KeyValueStore`.
:param str host: Redis parameter: hostname.
:param str port: Redis parameter: port.
:param int db: Redis parameter: default database id.
:param dict altdbs: List of alternative databases. Some of the functions
can use other redis databases than the default.
:param serialize: Serialization function. Used to convert objects to
storable representation (JSON, YAML, etc.)
:type serialize: :class:`object` ``->`` :class:`str`
:param deserialize: Deserialization function. Used to convert stored data
to run-time objects.
:type deserialize: :class:`str` -> :class:`object`
"""
def __init__(self, host='localhost', port='6379', db=0, altdbs=None,
serialize=yaml.dump, deserialize=yaml.load,
**kwargs):
super(RedisKVStore, self).__init__(**kwargs)
self.host, self.port, self.default_db = host, port, db
self.altdbs = util.coalesce(altdbs, dict())
self.inverse_altdbs = dict((v, k) for k, v in list(self.altdbs.items()))
if len(self.altdbs) != len(self.inverse_altdbs):
raise exc.ConfigurationError('The specified altdbs is not a bijection',
self.altdbs)
self.serialize = serialize
self.deserialize = deserialize
def transform_key(self, key):
tkey = DBSelectorKey(key, self)
log.debug("Accessing key: %s", tkey)
return tkey.get_connection()
def inverse_transform(self, backend, key):
db = backend.connection_pool.connection_kwargs['db']
return key if db == 0 \
else '{0}:{1}'.format(self.inverse_altdbs[db], key)
def query_item(self, key, default=None):
log.debug('Querying %r', key)
backend, key = self.transform_key(key)
data = backend.get(key)
retval = self.deserialize(data,Loader=yaml.Loader) if data else None
return util.coalesce(retval, default)
def set_item(self, key, value):
log.debug('Setting %r', key)
backend, key = self.transform_key(key)
backend.set(key, self.serialize(value) if value else None)
def _contains_key(self, key):
log.debug('Checking %r', key)
backend, key = self.transform_key(key)
return backend.exists(key)
def _enumerate(self, pattern, **kwargs):
log.debug('Listing keys against pattern %r', pattern)
if callable(pattern):
import itertools as it
backend, _ = self.transform_key('')
return it.ifilter(pattern, list(backend.keys()))
else:
backend, pattern = self.transform_key(pattern)
return [self.inverse_transform(backend, key)
for key in backend.keys(pattern)]
def delete_key(self, key):
log.debug('Deleting %r', key)
backend, key = self.transform_key(key)
backend.delete(key)
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
IO - bpRNA
================
It help process the result file from the tool - 'bpRNA'.
"""
import os
from typing import Tuple, Optional
from neoRNA.sequence.sequence import Sequence
from neoRNA.sequence.base_pair import BasePair
from neoRNA.structure.secondary_structure_element import SecondaryStructureElement, SecondaryStructureElementType
from neoRNA.structure.secondary_structure import SecondaryStructure
class BpRnaIO(object):
r"""
Parse the "result" file of 'bpRNA'.
The result file defines the "RNA secondary structure".
The file is a "plaintext" file.
- Refer to the example file `/tests/io/example_files/bprna_example.st` for its format.
- ref: bpRNA - http://bprna.cgrb.oregonstate.edu/download.php#bpRNA
Current Format:
- "#" line - references, may have multiple lines
- Line 1 - RNA sequence
- Line 2 - Dot-Bracket string
- Line 3 - Dot-Bracket annotation string
- Line 4 - Dot-Bracket annotation validation string
- Other lines - Secondary structure elements, like stem, bulge, etc.
"""
# The "marker" for each of the "Record"
# - A record usually starts with a "comment".
RECORD_MARKER = '#'
# Used for storing parsing results
INFO = {}
ELEMENTS = []
# Temp location for "Interior Loop"
interior_left_raw_string = None
interior_left_sequence = None
interior_left_base_pair = None
# Helper variables for "Multi-loop"
multiloop_current_no = 0
multiloop_current_element: SecondaryStructureElement = None
multiloop_current_element_raw_str_list = list()
# ----------------------------------
# region Iterator Generator
@classmethod
def parse_iterator(cls, handle):
r"""
Iterate over records and parse it as objects.
Parameters
----------
handle: any
input file.
Returns
-------
parsed_objects: BasePairProbabilityIO
Parsed objects.
Usage
-------
>>> with open("bp-rna.st") as handle:
... for record in BpRnaIO.parse_iterator(handle):
... print(record.comment)
...
"""
for comment, info, elements in cls.parse(handle):
yield SecondaryStructure(comment,
info['sequence'],
info['dot_bracket'],
info['dot_bracket_annotation'],
info['dot_bracket_validation'],
elements)
# endregion
# ----------------------------------
# region Parser
@classmethod
def parse(cls, handle):
"""
Parse the file.
Parameters
----------
handle: handle
input file.
Returns
-------
A tuple of strings (comment, bpp_str).
"""
# Skip any text before the first record (e.g. blank lines, comments)
while True:
line = handle.readline()
if line == "":
return
if line[0] == cls.RECORD_MARKER: # Find the "first line" of a record.
break
while True:
#
if line[0] != cls.RECORD_MARKER:
raise ValueError(
"Records should start with '{}' character!".format(cls.RECORD_MARKER))
# Reset
cls.INFO = {}
cls.ELEMENTS = []
# Helper variables for "Interior loop"
cls.interior_left_raw_string = None
cls.interior_left_sequence = None
cls.interior_left_base_pair = None
# Helper variable for "Multiloop"
cls.multiloop_current_no = 0
cls.multiloop_current_element = None
cls.multiloop_current_element_raw_str_list = []
# Get "Reference Info" from the first line
# Ex: #Name: 001_with_reactivity
comment = line[7:].rstrip() # Remove the first part - "#Name: ".
# Skip other 2 lines
for index in range(1, 3):
handle.readline()
# Parse the "Sequence Info"
# It has "4" lines
for index in range(1, 5):
cls.parse_basic_info(handle.readline(), index)
# Parse the "elements" till the next "record marker"
line = handle.readline()
while True:
if not line:
break
if line[0] == cls.RECORD_MARKER:
break
#
cls.parse_element(line)
#
line = handle.readline()
# Check if there is any "leftover" before "return"
if cls.multiloop_current_element:
cls.multiloop_current_element.raw_string = ' | '.join(cls.multiloop_current_element_raw_str_list)
cls.ELEMENTS.append(cls.multiloop_current_element)
#
cls.multiloop_current_element = None
cls.multiloop_current_element_raw_str_list = []
cls.multiloop_current_no = 0
yield comment, cls.INFO, cls.ELEMENTS
if not line:
return # StopIteration
# endregion
# ----------------------------------
# region Methods - Parsing Functions
@classmethod
def parse_basic_info(cls, line, line_parsed):
"""
Parse the basic info from the "read line".
:param line:
:param line_parsed:
:return:
"""
if line_parsed == 1:
# sequence
cls.INFO['sequence'] = line.strip()
elif line_parsed == 2:
# Dot-bracket string
cls.INFO['dot_bracket'] = line.strip()
elif line_parsed == 3:
# Dot-bracket annotation
cls.INFO['dot_bracket_annotation'] = line.strip()
elif line_parsed == 4:
# Dot-bracket validation
cls.INFO['dot_bracket_validation'] = line.strip()
else:
return
@classmethod
def parse_element(cls, line):
"""
Parse the "secondary structure element" info.
:param line:
:return:
"""
if not line.strip():
return
import re
__delimiters = '\s'
parts = re.split(__delimiters, line.strip())
if not len(parts) >= 2:
raise ValueError('The line must have at least 2 parts', line)
# Decode the element index info
element_type, element_no, element_index = cls.decode_element_index(parts[0])
element = SecondaryStructureElement(element_type, line.strip())
if element_type == SecondaryStructureElementType.Stem:
# It has "2" sequences
if len(parts) == 5:
# 1
start_position, end_position = cls.determine_position_pair(parts[1])
sequence_str = cls.determine_sequence(parts[2])
element.add_sequence(Sequence(sequence_str, range(start_position, end_position + 1)))
# 2
start_position, end_position = cls.determine_position_pair(parts[3])
sequence_str = cls.determine_sequence(parts[4])
element.add_sequence(Sequence(sequence_str, range(start_position, end_position + 1)))
#
cls.ELEMENTS.append(element)
elif element_type == SecondaryStructureElementType.Hairpin:
# It has "1" sequence and "1" base pair
if len(parts) == 5:
# 1
start_position, end_position = cls.determine_position_pair(parts[1])
sequence_str = cls.determine_sequence(parts[2])
element.add_sequence(Sequence(sequence_str, range(start_position, end_position + 1)))
# 2
position_pair = cls.determine_position_pair(parts[3])
nt_str = cls.determine_nt_pair(parts[4])
element.add_base_pair(BasePair(nt_str, position_pair))
#
cls.ELEMENTS.append(element)
elif element_type == SecondaryStructureElementType.Bulge:
# It has "1" sequence and "2" base pairs
if len(parts) == 7:
# 1
start_position, end_position = cls.determine_position_pair(parts[1])
sequence_str = cls.determine_sequence(parts[2])
element.add_sequence(Sequence(sequence_str, range(start_position, end_position + 1)))
# 2
position_pair = cls.determine_position_pair(parts[3])
nt_str = cls.determine_nt_pair(parts[4])
element.add_base_pair(BasePair(nt_str, position_pair))
# 3
position_pair = cls.determine_position_pair(parts[5])
nt_str = cls.determine_nt_pair(parts[6])
element.add_base_pair(BasePair(nt_str, position_pair))
#
cls.ELEMENTS.append(element)
elif element_type == SecondaryStructureElementType.Unpaired:
# It has "1" sequence and "2" base pairs
if len(parts) == 7:
# 1
start_position, end_position = cls.determine_position_pair(parts[1])
sequence_str = cls.determine_sequence(parts[2])
element.add_sequence(Sequence(sequence_str, range(start_position, end_position + 1)))
# 2
position_pair = cls.determine_position_pair(parts[3])
nt_str = cls.determine_nt_pair(parts[4])
element.add_base_pair(BasePair(nt_str, position_pair))
# 3
position_pair = cls.determine_position_pair(parts[5])
nt_str = cls.determine_nt_pair(parts[6])
element.add_base_pair(BasePair(nt_str, position_pair))
#
cls.ELEMENTS.append(element)
elif element_type == SecondaryStructureElementType.Multiloop:
# It has "1" sequence and "2" base pairs
if len(parts) == 7:
# 1
start_position, end_position = cls.determine_position_pair(parts[1])
sequence_str = cls.determine_sequence(parts[2])
# 2
position_pair_1 = cls.determine_position_pair(parts[3])
nt_str_1 = cls.determine_nt_pair(parts[4])
# 3
position_pair_2 = cls.determine_position_pair(parts[5])
nt_str_2 = cls.determine_nt_pair(parts[6])
# Check if it is a "new" multiloop element
if cls.multiloop_current_no == 0 or cls.multiloop_current_no != element_no:
# Save the previous element
if cls.multiloop_current_no != 0:
cls.multiloop_current_element.raw_string = ' | '.join(
cls.multiloop_current_element_raw_str_list)
cls.ELEMENTS.append(cls.multiloop_current_element)
#
cls.multiloop_current_element = None
cls.multiloop_current_element_raw_str_list = []
cls.multiloop_current_no = 0
# A "new" multiloop
cls.multiloop_current_no = element_no
cls.multiloop_current_element = element
#
cls.multiloop_current_element.add_sequence(Sequence(sequence_str, range(start_position, end_position + 1)))
cls.multiloop_current_element.add_base_pair(BasePair(nt_str_1, position_pair_1))
cls.multiloop_current_element.add_base_pair(BasePair(nt_str_2, position_pair_2))
cls.multiloop_current_element_raw_str_list.append(element.raw_string)
else:
# For an "exist" multiloop
cls.multiloop_current_element.add_sequence(
Sequence(sequence_str, range(start_position, end_position + 1)))
cls.multiloop_current_element.add_base_pair(BasePair(nt_str_1, position_pair_1))
cls.multiloop_current_element.add_base_pair(BasePair(nt_str_2, position_pair_2))
cls.multiloop_current_element_raw_str_list.append(element.raw_string)
elif element_type == SecondaryStructureElementType.Interior:
# Each "Interior Loop" includes "TWO" lines to present
# Each line includes "1" sequence and "1" base pair
if len(parts) == 5:
# 1
start_position, end_position = cls.determine_position_pair(parts[1])
sequence_str = cls.determine_sequence(parts[2])
sequence = Sequence(sequence_str, range(start_position, end_position + 1))
# 2
position_pair = cls.determine_position_pair(parts[3])
nt_str = cls.determine_nt_pair(parts[4])
base_pair = BasePair(nt_str, position_pair)
# Determine if needs to add a new element or just save it as a "temp" data'
if cls.interior_left_sequence is None:
cls.interior_left_raw_string = element.raw_string
cls.interior_left_sequence = sequence
cls.interior_left_base_pair = base_pair
else:
# Ready to add "Interior Loop" as a new element
element.add_sequence(cls.interior_left_sequence)
element.add_sequence(sequence)
element.add_base_pair(cls.interior_left_base_pair)
element.add_base_pair(base_pair)
element.raw_string = '{} | {}'.format(cls.interior_left_raw_string, element.raw_string)
cls.interior_left_raw_string = None
cls.interior_left_sequence = None
cls.interior_left_base_pair = None
#
cls.ELEMENTS.append(element)
elif element_type == SecondaryStructureElementType.End:
# It has "1" sequence
if len(parts) == 3:
# 1
start_position, end_position = cls.determine_position_pair(parts[1])
sequence_str = cls.determine_sequence(parts[2])
element.add_sequence(Sequence(sequence_str, range(start_position, end_position + 1)))
#
cls.ELEMENTS.append(element)
elif element_type == SecondaryStructureElementType.Segment:
# It has "1" sequence
if len(parts) > 2 and len(parts) % 2 == 0:
# 1
count = cls.determine_base_pair_count(parts[1])
element.base_pair_count = count
# Loop till the end to add all sequences
index = 2
while index < len(parts):
start_position, end_position = cls.determine_position_pair(parts[index])
sequence_str = cls.determine_sequence(parts[index+1])
element.add_sequence(Sequence(sequence_str, range(start_position, end_position + 1)))
index += 2
#
cls.ELEMENTS.append(element)
# endregion
# ----------------------------------
# region Methods - Static
@staticmethod
def decode_element_index(element_index_str: str) -> Tuple[Optional[str], Optional[int], Optional[int]]:
r"""
Decode the "element index string" to retrieve the following parts:
- element type
- element no#
- element internal index (Optional)
Example "element index string":
- S1
- I1.1
- segment1
Parameters
----------
element_index_str: str
Returns
-------
"""
if not element_index_str.strip():
return None, None, None
import re
# regex = '([\D]+)' # Non-digit string
regex = '^(?P<type>[\D]+)(?P<no>[\d]+)?\D*(?P<index>[\d]+)?$'
found = re.search(regex, element_index_str.strip())
# Decode the results
result_dict = found.groupdict() if found else {}
element_type = None
element_no = None
element_index = None
if 'type' in result_dict:
element_type = SecondaryStructureElementType.get_type(result_dict['type'].upper())
if 'no' in result_dict:
element_no = result_dict['no']
if 'index' in result_dict:
element_index = result_dict['index']
return element_type, element_no, element_index
@staticmethod
def determine_sequence(sequence_str: str):
r"""
Determine the "sequence" from a given string.
Example format:
- "CUU"
- ""
Parameters
----------
sequence_str: str
Returns
-------
"""
if not sequence_str.strip():
return None
import re
regex = '[\W]*([a-zA-Z]+)' # Retrieve one string
matched = re.match(regex, sequence_str.strip())
if not matched:
return None
if len(matched.groups()) != 1:
raise ValueError('The string does not contain 1 string', sequence_str)
return matched.group(1)
@staticmethod
def determine_position_pair(position_pair_str: str):
r"""
Determine the "position pair" ( 2 digits) from a given string.
The pair could be "start" and "end" of a sequence, or two positions of a base pair.
Example format:
- 48..50
- (47,30)
Parameters
----------
position_pair_str: str
Returns
-------
"""
if not position_pair_str.strip():
return None
import re
regex = '[\D]*([\d]+)[\D]+([\d]+)' # Retrieve two numbers
matched = re.match(regex, position_pair_str.strip())
if len(matched.groups()) != 2:
raise ValueError('The string does not contain 2 numbers', position_pair_str)
return int(matched.group(1)), int(matched.group(2))
@staticmethod
def determine_nt_pair(string):
"""
Determine the "nt pair" ( 2 letters) from a given string.
Example format:
- G:C
:param string:
:return: A range
"""
if not string.strip():
return None
import re
regex = '[\W]*([\w]):([\w])' # Retrieve two letters
matched = re.match(regex, string.strip())
if len(matched.groups()) != 2:
raise ValueError('The string does not contain 2 letters', string)
return matched.group(1), matched.group(2)
@staticmethod
def determine_base_pair_count(string):
"""
Determine the "Base Pair Count" based on the given "string".
The format of the string may be like:
- 25bp
:param string:
:return:
"""
if not string.strip():
return None
import re
regex = '([\d]+)' # Retrieve a number
matched = re.match(regex, string.strip())
if len(matched.groups()) != 1:
raise ValueError('The string does not contain 1 number', string)
return int(matched.group(1))
# endregion
|
<reponame>jhover/rubygem-rpmbuild
#!/bin/env python
__author__ = "<NAME>"
__copyright__ = "2017 <NAME>"
__credits__ = []
__license__ = "GPL"
__version__ = "0.9.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import argparse
import datetime
import logging
import os
import subprocess
import sys
import time
from ConfigParser import ConfigParser
def _runtimedcommand(cmd):
'''
@param string: Command shell string to be run. Can contain semicolons for compound commands.
'''
log = logging.getLogger()
before = time.time()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out = None
(out, err) = p.communicate()
delta = time.time() - before
log.debug('%s seconds to perform command' % delta)
if p.returncode == 0:
log.debug('Leaving with OK return code. Err is "%s"' % err)
else:
log.warning('Leaving with bad return code. rc=%s err=%s out=%s' %(p.returncode, err, out ))
return (p.returncode, out, err)
class GemBuildException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class GemHandler(object):
handledgems = set()
problemgems = set()
def __init__(self, cp , gemname, skipdeps=False, rebuild=False):
self.log = logging.getLogger()
self.log.info("Handling gem %s" % gemname)
self.config = cp
self.gemname = gemname
self.rpmbuilddir = os.path.expanduser(cp.get('global','rpmbuilddir'))
self.gemtemplate = os.path.expanduser(cp.get('global','gemtemplate'))
self.tempdir = os.path.expanduser(cp.get('global','tempdir'))
self.skipdeps = skipdeps
self.rebuild = rebuild
self.version = None
#self.packagelog = os.path.expanduser(cp.get('packagelog'))
def setupDirs(self):
'''
setup rpmbuild RPMS SOURCES SPECS
'''
dirs = [ '%s/SOURCES' % self.rpmbuilddir,
'%s/SPECS' % self.rpmbuilddir,
'%s/BUILD' % self.rpmbuilddir,
'%s/RPMS/noarch' % self.rpmbuilddir,
'%s/RPMS/x86_64' % self.rpmbuilddir,
'%s/BUILDROOT' % self.rpmbuilddir,
'%s/%s' % (self.tempdir, self.gemname),
]
for d in dirs:
try:
os.makedirs(d)
except OSError, e:
pass
self.log.debug("Various dirs made OK.")
def fetchGem(self):
'''
Fetches this gem from
'''
cmd = "cd %s/SOURCES ; gem fetch %s " % (self.rpmbuilddir, self.gemname)
self.log.debug("Command is %s" % cmd )
(r, o, e) = _runtimedcommand(cmd)
if r is 0 and not e.startswith('ERR'):
self.log.debug("Out is %s" % o)
fields = o.split()
nv = fields[1]
self.version = nv[len(self.gemname)+1:]
self.log.debug("Version is %s" % self.version)
self.log.info("Gem %s-%s fetched." % (self.gemname, self.version) )
else:
self.log.warning("Error/problem fetching %s" % self.gemname)
raise GemBuildException('Problem fetching...')
def makeSpec(self):
'''
gem2rpm -t $TEMPLATE $gem-[0-9]*.gem > $RPMBUILDDIR/SPECS/rubygem-$gem.spec
'''
cmd = "gem2rpm -t %s %s/SOURCES/%s-%s.gem > %s/SPECS/rubygem-%s.spec " % (
self.gemtemplate,
self.rpmbuilddir,
self.gemname,
self.version,
self.rpmbuilddir,
self.gemname)
self.log.debug("Command is %s" % cmd )
(r, o, e) = _runtimedcommand(cmd)
self.log.info("Created rubygem-%s.spec " % self.gemname)
self.specfile = "%s/SPECS/rubygem-%s.spec" % ( self.rpmbuilddir, self.gemname)
def fixSpec(self):
'''
Fix empty URL Tag value...
'''
sf = open(self.specfile, 'r')
linelist = sf.readlines()
sf.close()
sf2 = open(self.specfile, 'w')
for line in linelist:
if line.strip() == 'URL:':
line = 'URL: http://rubygems.org/\n'
sf2.write(line)
sf2.close()
def parseDeps(self):
'''
'''
self.deps = []
depset = set()
cmd = "cd %s/SOURCES ; gem2rpm -d %s-%s.gem" % (self.rpmbuilddir, self.gemname, self.version)
self.log.debug("Command is %s" % cmd )
(r, o, e) = _runtimedcommand(cmd)
if r is 0:
self.log.debug("Out is %s" % o)
o = o.strip()
if len(o) > 3:
lines = o.split('\n')
for line in lines:
if len(line) > 3:
fields = line.split()
dep = fields[0]
if len(fields) > 2:
op = fields[1]
ver = fields[2]
self.log.debug("Dep is %s" % dep)
if dep.startswith('rubygem'):
depname = dep[8:-1]
self.log.debug("Adding dependency %s" % depname)
depset.add(depname)
else:
self.log.debug("No dependencies.")
self.deps = list(depset)
def isBuilt(self):
'''
Checks to see if RPM for this specific gem and version is already built in
rpmbuilddir.
'''
naf = "%s/RPMS/noarch/rubygem-%s-%s-1.noarch.rpm" % (self.rpmbuilddir,
self.gemname,
self.version)
nf = "%s/RPMS/x86_64/rubygem-%s-%s-1.x86_64.rpm" % (self.rpmbuilddir,
self.gemname,
self.version)
rpmbuilt = False
if os.path.isfile(naf):
rpmbuilt = True
self.log.debug('Noarch RPM for rubygem-%s-%s exists.' % (self.gemname,
self.version))
if os.path.isfile(nf):
rpmbuilt = True
self.log.debug('Native RPM for rubygem-%s-%s exists.' % (self.gemname,
self.version))
return rpmbuilt
def buildRPM(self):
'''
rpmbuild -bb $RPMBUILDDIR/SPECS/rubygem-$gem.spec
'''
if not self.isBuilt() or self.rebuild:
self.log.debug("Building gem %s" % self.gemname)
cmd = "rpmbuild -bb %s/SPECS/rubygem-%s.spec" % (self.rpmbuilddir,
self.gemname)
self.log.debug("Command is %s" % cmd )
(r, o,e) = _runtimedcommand(cmd)
if r is 0:
self.log.info("RPM for rubygem-%s built OK." % self.gemname)
#elif 'error: Arch dependent binaries in noarch package' in e:
elif 'Building native extensions.' in o:
self.log.warning('Native package, fixing and building...')
self.buildNativeRPM()
else:
self.log.error("Problem building RPM for rubygem-%s." % self.gemname)
GemHandler.problemgems.add(self.gemname)
raise GemBuildException('Problem building RPM.')
else:
self.log.info("RPM for %s-%s already built. Skipping..." % (self.gemname, self.version))
def convertSpecNative(self):
'''
Fixes spec for this gem to Arch: x86_64
'''
self.log.debug("Converting %s spec to native..." % self.gemname)
sf = open(self.specfile, 'r')
linelist = sf.readlines()
sf.close()
sf2 = open(self.specfile, 'w')
for line in linelist:
line = line.replace('BuildArch: noarch' , 'BuildArch: x86_64')
sf2.write(line)
sf2.close()
def buildNativeRPM(self):
'''
Converts spec to Arch: x86-64 and re-builds.
'''
self.log.debug("Building gem %s native..." % self.gemname)
self.convertSpecNative()
self.log.debug("Building gem %s" % self.gemname)
cmd = "rpmbuild -bb %s/SPECS/rubygem-%s.spec" % (self.rpmbuilddir,
self.gemname)
self.log.debug("Command is %s" % cmd )
(r, o,e) = _runtimedcommand(cmd)
if r is 0:
self.log.info("RPM for rubygem-%s built OK." % self.gemname)
else:
self.log.error("Problem building RPM for rubygem-%s." % self.gemname)
GemHandler.problemgems.add(self.gemname)
raise GemBuildException('Problem building RPM.')
def handleDeps(self):
'''
'''
for dep in self.deps:
self.log.debug('Processing dep %s' % dep)
if dep not in GemHandler.handledgems or dep not in GemHandler.problemgems:
gh = GemHandler(self.config, dep)
gh.handleGem()
else:
self.log.debug("Gem %s already done." % dep)
self.log.info("Finished handling deps for %s" % self.gemname)
def handleGem(self):
self.setupDirs()
try:
if (self.gemname not in GemHandler.handledgems) and (self.gemname not in GemHandler.problemgems):
self.fetchGem()
if not self.isBuilt() or self.rebuild:
self.makeSpec()
self.fixSpec()
self.buildRPM()
self.log.debug("Adding gem %s to done list." % self.gemname)
GemHandler.handledgems.add(self.gemname)
else:
self.log.info("RPM for %s-%s already built. Skipping..." % (self.gemname, self.version))
self.log.debug("Adding gem %s to done list." % self.gemname)
GemHandler.handledgems.add(self.gemname)
if not self.skipdeps:
self.parseDeps()
self.handleDeps()
except GemBuildException, e:
self.log.error('Problem building gem %s: Error: %s' % (self.gemname, e) )
GemHandler.problemgems.add(self.gemname)
class GemRPMCLI(object):
def __init__(self):
self.parseopts()
self.setuplogging()
def setuplogging(self):
self.log = logging.getLogger()
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(name)s %(filename)s:%(lineno)d %(funcName)s(): %(message)s'
formatter = logging.Formatter(FORMAT)
#formatter.converter = time.gmtime # to convert timestamps to UTC
logStream = logging.StreamHandler()
logStream.setFormatter(formatter)
self.log.addHandler(logStream)
self.log.setLevel(logging.WARN)
if self.results.debug:
self.log.setLevel(logging.DEBUG)
if self.results.info:
self.log.setLevel(logging.INFO)
self.log.info('Logging initialized.')
def parseopts(self):
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config',
action="store",
dest='configpath',
default='~/etc/rubygemrpm.conf',
help='Configuration file path.')
parser.add_argument('-d', '--debug',
action="store_true",
dest='debug',
help='Debug level logging')
parser.add_argument('-v', '--verbose',
action="store_true",
dest='info',
help='Info level logging')
parser.add_argument('-s', '--skipdeps',
action="store_true",
default = False,
dest='skipdeps',
help='Skip building deps recursively')
parser.add_argument('-r', '--rebuild',
action="store_true",
default = False,
dest = 'rebuild',
help = 'Rebuild even if RPM exists')
parser.add_argument('gemnames',
nargs = '*',
metavar = 'GEMNAME',
action="store")
self.results= parser.parse_args()
print(self.results)
def invoke(self):
starttime = datetime.datetime.now()
cp = ConfigParser()
ns = self.results
self.log.info("Config is %s" % ns.configpath)
cp.read(os.path.expanduser(ns.configpath))
for gn in ns.gemnames :
gh = GemHandler(cp, gn , skipdeps=ns.skipdeps, rebuild=ns.rebuild)
gh.handleGem()
self.log.info("Handled %d gems: %s" % ( len(GemHandler.handledgems),
list(GemHandler.handledgems) ))
self.log.error("Problems with %d gems: %s" % (len(GemHandler.problemgems),
list(GemHandler.problemgems)))
delta = datetime.datetime.now() - starttime
self.log.info("Total time: %s " % str(delta))
if __name__ == '__main__':
rgcli = GemRPMCLI()
rgcli.invoke()
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import os
import tempfile
from copy import deepcopy
from itertools import chain
from unittest import TestCase
import torch
from mmcv.runner import load_checkpoint, save_checkpoint
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmcls.models.backbones import SwinTransformer
from mmcls.models.backbones.swin_transformer import SwinBlock
from .utils import timm_resize_pos_embed
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
class TestSwinTransformer(TestCase):
def setUp(self):
self.cfg = dict(
arch='b', img_size=224, patch_size=4, drop_path_rate=0.1)
def test_arch(self):
# Test invalid default arch
with self.assertRaisesRegex(AssertionError, 'not in default archs'):
cfg = deepcopy(self.cfg)
cfg['arch'] = 'unknown'
SwinTransformer(**cfg)
# Test invalid custom arch
with self.assertRaisesRegex(AssertionError, 'Custom arch needs'):
cfg = deepcopy(self.cfg)
cfg['arch'] = {
'embed_dims': 96,
'num_heads': [3, 6, 12, 16],
}
SwinTransformer(**cfg)
# Test custom arch
cfg = deepcopy(self.cfg)
depths = [2, 2, 4, 2]
num_heads = [6, 12, 6, 12]
cfg['arch'] = {
'embed_dims': 256,
'depths': depths,
'num_heads': num_heads
}
model = SwinTransformer(**cfg)
for i, stage in enumerate(model.stages):
self.assertEqual(stage.embed_dims, 256 * (2**i))
self.assertEqual(len(stage.blocks), depths[i])
self.assertEqual(stage.blocks[0].attn.w_msa.num_heads,
num_heads[i])
def test_init_weights(self):
# test weight init cfg
cfg = deepcopy(self.cfg)
cfg['use_abs_pos_embed'] = True
cfg['init_cfg'] = [
dict(
type='Kaiming',
layer='Conv2d',
mode='fan_in',
nonlinearity='linear')
]
model = SwinTransformer(**cfg)
ori_weight = model.patch_embed.projection.weight.clone().detach()
# The pos_embed is all zero before initialize
self.assertTrue(
torch.allclose(model.absolute_pos_embed, torch.tensor(0.)))
model.init_weights()
initialized_weight = model.patch_embed.projection.weight
self.assertFalse(torch.allclose(ori_weight, initialized_weight))
self.assertFalse(
torch.allclose(model.absolute_pos_embed, torch.tensor(0.)))
pretrain_pos_embed = model.absolute_pos_embed.clone().detach()
tmpdir = tempfile.gettempdir()
# Save v3 checkpoints
checkpoint_v2 = os.path.join(tmpdir, 'v3.pth')
save_checkpoint(model, checkpoint_v2)
# Save v1 checkpoints
setattr(model, 'norm', model.norm3)
setattr(model.stages[0].blocks[1].attn, 'attn_mask',
torch.zeros(64, 49, 49))
model._version = 1
del model.norm3
checkpoint_v1 = os.path.join(tmpdir, 'v1.pth')
save_checkpoint(model, checkpoint_v1)
# test load v1 checkpoint
cfg = deepcopy(self.cfg)
cfg['use_abs_pos_embed'] = True
model = SwinTransformer(**cfg)
load_checkpoint(model, checkpoint_v1, strict=True)
# test load v3 checkpoint
cfg = deepcopy(self.cfg)
cfg['use_abs_pos_embed'] = True
model = SwinTransformer(**cfg)
load_checkpoint(model, checkpoint_v2, strict=True)
# test load v3 checkpoint with different img_size
cfg = deepcopy(self.cfg)
cfg['img_size'] = 384
cfg['use_abs_pos_embed'] = True
model = SwinTransformer(**cfg)
load_checkpoint(model, checkpoint_v2, strict=True)
resized_pos_embed = timm_resize_pos_embed(
pretrain_pos_embed, model.absolute_pos_embed, num_tokens=0)
self.assertTrue(
torch.allclose(model.absolute_pos_embed, resized_pos_embed))
os.remove(checkpoint_v1)
os.remove(checkpoint_v2)
def test_forward(self):
imgs = torch.randn(3, 3, 224, 224)
cfg = deepcopy(self.cfg)
model = SwinTransformer(**cfg)
outs = model(imgs)
self.assertIsInstance(outs, tuple)
self.assertEqual(len(outs), 1)
feat = outs[-1]
self.assertEqual(feat.shape, (3, 1024, 7, 7))
# test with window_size=12
cfg = deepcopy(self.cfg)
cfg['window_size'] = 12
model = SwinTransformer(**cfg)
outs = model(torch.randn(3, 3, 384, 384))
self.assertIsInstance(outs, tuple)
self.assertEqual(len(outs), 1)
feat = outs[-1]
self.assertEqual(feat.shape, (3, 1024, 12, 12))
with self.assertRaisesRegex(AssertionError, r'the window size \(12\)'):
model(torch.randn(3, 3, 224, 224))
# test with pad_small_map=True
cfg = deepcopy(self.cfg)
cfg['window_size'] = 12
cfg['pad_small_map'] = True
model = SwinTransformer(**cfg)
outs = model(torch.randn(3, 3, 224, 224))
self.assertIsInstance(outs, tuple)
self.assertEqual(len(outs), 1)
feat = outs[-1]
self.assertEqual(feat.shape, (3, 1024, 7, 7))
# test multiple output indices
cfg = deepcopy(self.cfg)
cfg['out_indices'] = (0, 1, 2, 3)
model = SwinTransformer(**cfg)
outs = model(imgs)
self.assertIsInstance(outs, tuple)
self.assertEqual(len(outs), 4)
for stride, out in zip([2, 4, 8, 8], outs):
self.assertEqual(out.shape,
(3, 128 * stride, 56 // stride, 56 // stride))
# test with checkpoint forward
cfg = deepcopy(self.cfg)
cfg['with_cp'] = True
model = SwinTransformer(**cfg)
for m in model.modules():
if isinstance(m, SwinBlock):
self.assertTrue(m.with_cp)
model.init_weights()
model.train()
outs = model(imgs)
self.assertIsInstance(outs, tuple)
self.assertEqual(len(outs), 1)
feat = outs[-1]
self.assertEqual(feat.shape, (3, 1024, 7, 7))
# test with dynamic input shape
imgs1 = torch.randn(3, 3, 224, 224)
imgs2 = torch.randn(3, 3, 256, 256)
imgs3 = torch.randn(3, 3, 256, 309)
cfg = deepcopy(self.cfg)
model = SwinTransformer(**cfg)
for imgs in [imgs1, imgs2, imgs3]:
outs = model(imgs)
self.assertIsInstance(outs, tuple)
self.assertEqual(len(outs), 1)
feat = outs[-1]
expect_feat_shape = (math.ceil(imgs.shape[2] / 32),
math.ceil(imgs.shape[3] / 32))
self.assertEqual(feat.shape, (3, 1024, *expect_feat_shape))
def test_structure(self):
# test drop_path_rate decay
cfg = deepcopy(self.cfg)
cfg['drop_path_rate'] = 0.2
model = SwinTransformer(**cfg)
depths = model.arch_settings['depths']
blocks = chain(*[stage.blocks for stage in model.stages])
for i, block in enumerate(blocks):
expect_prob = 0.2 / (sum(depths) - 1) * i
self.assertAlmostEqual(block.ffn.dropout_layer.drop_prob,
expect_prob)
self.assertAlmostEqual(block.attn.drop.drop_prob, expect_prob)
# test Swin-Transformer with norm_eval=True
cfg = deepcopy(self.cfg)
cfg['norm_eval'] = True
cfg['norm_cfg'] = dict(type='BN')
cfg['stage_cfgs'] = dict(block_cfgs=dict(norm_cfg=dict(type='BN')))
model = SwinTransformer(**cfg)
model.init_weights()
model.train()
self.assertTrue(check_norm_state(model.modules(), False))
# test Swin-Transformer with first stage frozen.
cfg = deepcopy(self.cfg)
frozen_stages = 0
cfg['frozen_stages'] = frozen_stages
cfg['out_indices'] = (0, 1, 2, 3)
model = SwinTransformer(**cfg)
model.init_weights()
model.train()
# the patch_embed and first stage should not require grad.
self.assertFalse(model.patch_embed.training)
for param in model.patch_embed.parameters():
self.assertFalse(param.requires_grad)
for i in range(frozen_stages + 1):
stage = model.stages[i]
for param in stage.parameters():
self.assertFalse(param.requires_grad)
for param in model.norm0.parameters():
self.assertFalse(param.requires_grad)
# the second stage should require grad.
for i in range(frozen_stages + 1, 4):
stage = model.stages[i]
for param in stage.parameters():
self.assertTrue(param.requires_grad)
norm = getattr(model, f'norm{i}')
for param in norm.parameters():
self.assertTrue(param.requires_grad)
|
#!/usr/bin/python
import time
import pprint
import json
import os
import copy
import shutil
import requesocks as requests
#import requests
from scrapy.selector import Selector
import urllib3
import sys
from datetime import datetime, timedelta
import mr_gov_il
class exemption_extended_data_web_page(mr_gov_il.extended_data_web_page_base):
def extract_page_data( self ):
#start_time = time.time()
sel = Selector(text=self.response.text)
ret = {}
if self.publication_id is not None:
ret['publication_id'] = self.publication_id
found_fields = 0
for field_name, xpath in [('description', '//*[@id="ctl00_PlaceHolderMain_lbl_PublicationName"]'),
('supplier_id', '//*[@id="ctl<EMAIL>_lbl_Supplier<EMAIL>"]'),
('supplier', '//*[@id="ctl<EMAIL>Main_lbl_Supplier<EMAIL>"]'),
('contact', '//*[@id="ctl00_PlaceHolderMain_lbl_ContactPersonName"]'),
('publisher', '//*[@id="ctl00_PlaceHolderMain_lbl_PUBLISHER"]'),
('contact_email', '//*[@id="ctl00_<EMAIL>_lbl_<EMAIL>"]'),
('claim_date', '//*[@id="ctl00_PlaceHolderMain_lbl_ClaimDate"]'),
('last_update_date', '//*[@id="ctl00_PlaceHolderMain_lbl_UpdateDate"]'),
('reason', '//*[@id="ctl<EMAIL>_<EMAIL>tor<EMAIL>"]'),
('source_currency', '//*[@id="ctl<EMAIL>"]'),
('regulation', '//*[@id="ctl<EMAIL>_<EMAIL>_<EMAIL>"]'),
('volume', '//*[@id="ctl00_PlaceHolderMain_lbl_TotalAmount"]'),
('subjects', '//*[@id="ctl00_<EMAIL>Main_lbl_Publication<EMAIL>"]'),
('start_date', '//*[@id="ctl00_PlaceHolderMain_lbl_StartDate"]'),
('end_date', '//*[@id="ctl00_PlaceHolderMain_lbl_EndDate"]'),
('decision', '//*[@id="ctl00_PlaceHolderMain_lbl_Decision"]'),
('page_title', '//*[@id="ctl00_PlaceHolderMain_lbl_PublicationType"]') ]:
if len(sel.xpath(xpath+'/text()')) == 0:
ret[field_name] = None
else:
found_fields += 1
try:
ret[field_name] = sel.xpath(xpath+'/text()')[0].extract()
except:
print "failed %s %s" % (field_name, url)
raise
if found_fields == 0:
raise mr_gov_il.NoSuchElementException( 'found_fields == 0' )
#if None in [ret["last_update_date"]]: # this sometimes happens...
# raise mr_gov_il.NoSuchElementException('last_update_date')
ret['url'] = self.url
ret['documents'] = []
links = sel.xpath('//*[@id="ctl00_PlaceHolderMain_pnl_Files"]/div/div/div[2]/a')
update_times = sel.xpath('//*[@id="ctl00_PlaceHolderMain_pnl_Files"]/div/div/div[1]')
if len(links) != len(update_times):
raise mr_gov_il.NoSuchElementException( 'len(links) != len(update_times)' )
for i in xrange( len(links) ):
ret['documents'].append( {'description':links[i].xpath('text()')[0].extract(),
'link':'http://www.mr.gov.il' + links[i].xpath('@href')[0].extract(),
'update_time':update_times.xpath('text()')[0].extract()
} )
#print 'parsed exended data in %f secs' % (time.time() - start_time)
return ret
class exemption_search_web_page(mr_gov_il.search_web_page_base):
extended_data_web_page_cls = exemption_extended_data_web_page
search_page_url = 'http://www.mr.gov.il/ExemptionMessage/Pages/SearchExemptionMessages.aspx'
publisher_option_xpath = '//*[@id="ctl00_m_g_cf609c81_a070_46f2_9543_e90c7ce5195b_ctl00_ddlPublisher"]/option'
results_table_base_xpath = '//*[@id="ctl00_m_g_cf609c81_a070_46f2_9543_e90c7ce5195b_ctl00_grvMichrazim"]'
url_xpath = results_table_base_xpath + '/tr[%d]/td[1]/a/@href'
expected_table_columns = 8
def fill_form( self, d={} ):
form_data = {
'__EVENTTARGET':'',
'__EVENTARGUMENT':'',
}
for form_data_elem in self.must_exist_xpath('//*[@id="aspnetForm"]/input'):
form_data[form_data_elem.xpath('@name')[0].extract()] = form_data_elem.xpath('@value')[0].extract()
for form_data_elem in self.must_exist_xpath('//*[@id="WebPartWPQ3"]//select'):
form_data[form_data_elem.xpath('@name')[0].extract()] = 0
for form_data_elem in self.must_exist_xpath('//*[@id="WebPartWPQ3"]//input'):
form_data[form_data_elem.xpath('@name')[0].extract()] = ''
if 'publisher_index' in self.search_params:
form_data['ctl00$m$g_cf609c81_a070_46f2_9543_e90c7ce5195b$ctl00$ddlPublisher'] = self.search_params['publisher_index']
form_data.update( d )
# the clear button was not clicked
form_data.pop( 'ctl00$m$g_cf609c81_a070_46f2_9543_e90c7ce5195b$ctl00$btnClear' )
if form_data['__EVENTTARGET']:
# if a page number was presses, the search button was not clicked
form_data.pop( 'ctl00$m$g_cf609c81_a070_46f2_9543_e90c7ce5195b$ctl00$btnSearch' )
# for k in sorted(form_data.keys()):
# v = form_data[k]
# if len(str(v)) < 20:
# print k, '=', repr(v)
# else:
# print k, '=', repr(v)[:20] + '...'
self.request( 'post', data=form_data )
@classmethod
def split_subjects( cls, record ):
if type(record['subjects']) is list:
return
if record['subjects'] is None:
record['subjects'] = []
return
record['subjects'] = record['subjects'].split( '; ' )
@classmethod
def add_publication_id( cls, record ):
if 'publication_id' in record:
return
url = record['url']
publication_id = None
for k,v in [x.split('=',1) for x in url.split('?',1)[1].split('&')]:
if k == 'pID':
publication_id = int(v)
break
if publication_id is None:
raise AssertionError( 'pID not in url %s' % url )
record['publication_id'] = publication_id
@classmethod
def process_record( cls, record ):
cls.add_publication_id( record )
cls.empty_str_is_none( record, 'supplier_id' )
cls.field_to_int( record, 'supplier_id' )
cls.zero_is_none( record, 'supplier_id' )
cls.format_documents_time( record )
cls.split_subjects( record )
cls.field_to_int( record, 'volume' )
cls.zero_is_none( record, 'volume' )
return record
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser( usage='usage: %prog [options] <output filename>' )
parser.add_option("--rescrape", dest="rescrape", action='store', help='rescrape the urls of a previous scrape', metavar='old_json', default=None)
parser.add_option("--since", dest="since", action='store', help='since a date or one of: yesterday, last_week, last_year, all_time', default=None)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error( 'must provide an output filename' )
if options.rescrape:
exemption_search_web_page.rescrape( input_filename=options.rescrape, output_filename=args[0] )
else:
exemption_search_web_page.scrape( output_filename=args[0], since=options.since )
|
# Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
async def test_should_dispatch_click_event(page, server):
await page.goto(server.PREFIX + "/input/button.html")
await page.dispatch_event("button", "click")
assert await page.evaluate("() => result") == "Clicked"
async def test_should_dispatch_click_event_properties(page, server):
await page.goto(server.PREFIX + "/input/button.html")
await page.dispatch_event("button", "click")
assert await page.evaluate("() => bubbles")
assert await page.evaluate("() => cancelable")
assert await page.evaluate("() => composed")
async def test_should_dispatch_click_svg(page):
await page.set_content(
"""
<svg height="100" width="100">
<circle onclick="javascript:window.__CLICKED=42" cx="50" cy="50" r="40" stroke="black" stroke-width="3" fill="red" />
</svg>
"""
)
await page.dispatch_event("circle", "click")
assert await page.evaluate("() => window.__CLICKED") == 42
async def test_should_dispatch_click_on_a_span_with_an_inline_element_inside(page):
await page.set_content(
"""
<style>
span::before {
content: 'q';
}
</style>
<span onclick='javascript:window.CLICKED=42'></span>
"""
)
await page.dispatch_event("span", "click")
assert await page.evaluate("() => window.CLICKED") == 42
async def test_should_dispatch_click_after_navigation(page, server):
await page.goto(server.PREFIX + "/input/button.html")
await page.dispatch_event("button", "click")
await page.goto(server.PREFIX + "/input/button.html")
await page.dispatch_event("button", "click")
assert await page.evaluate("() => result") == "Clicked"
async def test_should_dispatch_click_after_a_cross_origin_navigation(page, server):
await page.goto(server.PREFIX + "/input/button.html")
await page.dispatch_event("button", "click")
await page.goto(server.CROSS_PROCESS_PREFIX + "/input/button.html")
await page.dispatch_event("button", "click")
assert await page.evaluate("() => result") == "Clicked"
async def test_should_not_fail_when_element_is_blocked_on_hover(page, server):
await page.set_content(
"""<style>
container { display: block; position: relative; width: 200px; height: 50px; }
div, button { position: absolute; left: 0; top: 0; bottom: 0; right: 0; }
div { pointer-events: none; }
container:hover div { pointer-events: auto; background: red; }
</style>
<container>
<button onclick="window.clicked=true">Click me</button>
<div></div>
</container>"""
)
await page.dispatch_event("button", "click")
assert await page.evaluate("() => window.clicked")
async def test_should_dispatch_click_when_node_is_added_in_shadow_dom(page, server):
await page.goto(server.EMPTY_PAGE)
watchdog = page.dispatch_event("span", "click")
await page.evaluate(
"""() => {
const div = document.createElement('div');
div.attachShadow({mode: 'open'});
document.body.appendChild(div);
}"""
)
await page.evaluate("() => new Promise(f => setTimeout(f, 100))")
await page.evaluate(
"""() => {
const span = document.createElement('span');
span.textContent = 'Hello from shadow';
span.addEventListener('click', () => window.clicked = true);
document.querySelector('div').shadowRoot.appendChild(span);
}"""
)
await watchdog
assert await page.evaluate("() => window.clicked")
async def test_should_be_atomic(selectors, page, utils):
await utils.register_selector_engine(
selectors,
"dispatch_event",
"""{
create(root, target) { },
query(root, selector) {
const result = root.querySelector(selector);
if (result)
Promise.resolve().then(() => result.onclick = "");
return result;
},
queryAll(root, selector) {
const result = Array.from(root.querySelectorAll(selector));
for (const e of result)
Promise.resolve().then(() => result.onclick = "");
return result;
}
}""",
)
await page.set_content('<div onclick="window._clicked=true">Hello</div>')
await page.dispatch_event("dispatch_event=div", "click")
assert await page.evaluate("() => window._clicked")
async def test_should_dispatch_drag_drop_events(page, server):
await page.goto(server.PREFIX + "/drag-n-drop.html")
dataTransfer = await page.evaluate_handle("() => new DataTransfer()")
await page.dispatch_event("#source", "dragstart", {"dataTransfer": dataTransfer})
await page.dispatch_event("#target", "drop", {"dataTransfer": dataTransfer})
assert await page.evaluate(
"""() => {
return source.parentElement === target;
}"""
)
async def test_should_dispatch_drag_and_drop_events_element_handle(page, server):
await page.goto(server.PREFIX + "/drag-n-drop.html")
dataTransfer = await page.evaluate_handle("() => new DataTransfer()")
source = await page.query_selector("#source")
await source.dispatch_event("dragstart", {"dataTransfer": dataTransfer})
target = await page.query_selector("#target")
await target.dispatch_event("drop", {"dataTransfer": dataTransfer})
assert await page.evaluate(
"""() => {
return source.parentElement === target;
}"""
)
async def test_should_dispatch_click_event_element_handle(page, server):
await page.goto(server.PREFIX + "/input/button.html")
button = await page.query_selector("button")
await button.dispatch_event("click")
assert await page.evaluate("() => result") == "Clicked"
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
from heat.engine import translation
class Net(neutron.NeutronResource):
"""A resource for managing Neutron net.
A network is a virtual isolated layer-2 broadcast domain which is typically
reserved to the tenant who created it, unless the network has been
explicitly configured to be shared.
"""
entity = 'network'
PROPERTIES = (
NAME, VALUE_SPECS, ADMIN_STATE_UP, TENANT_ID, SHARED,
DHCP_AGENT_IDS, PORT_SECURITY_ENABLED, QOS_POLICY,
DNS_DOMAIN, TAGS,
) = (
'name', 'value_specs', 'admin_state_up', 'tenant_id', 'shared',
'dhcp_agent_ids', 'port_security_enabled', 'qos_policy',
'dns_domain', 'tags',
)
ATTRIBUTES = (
STATUS, NAME_ATTR, SUBNETS, ADMIN_STATE_UP_ATTR, TENANT_ID_ATTR,
PORT_SECURITY_ENABLED_ATTR, MTU_ATTR, QOS_POLICY_ATTR, L2_ADJACENCY,
SEGMENTS,
) = (
"status", "name", "subnets", "admin_state_up", "tenant_id",
"port_security_enabled", "mtu", 'qos_policy_id', 'l2_adjacency',
'segments',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('A string specifying a symbolic name for the network, which is '
'not required to be unique.'),
update_allowed=True
),
VALUE_SPECS: properties.Schema(
properties.Schema.MAP,
_('Extra parameters to include in the request. Parameters are '
'often specific to installed hardware or extensions.'),
default={},
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('A boolean value specifying the administrative status of the '
'network.'),
default=True,
update_allowed=True
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant which will own the network. Only '
'administrative users can set the tenant identifier; this '
'cannot be changed using authorization policies.')
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether this network should be shared across all tenants. '
'Note that the default policy setting restricts usage of this '
'attribute to administrative users only.'),
default=False,
update_allowed=True
),
DHCP_AGENT_IDS: properties.Schema(
properties.Schema.LIST,
_('The IDs of the DHCP agent to schedule the network. Note that '
'the default policy setting in Neutron restricts usage of this '
'property to administrative users only.'),
update_allowed=True
),
PORT_SECURITY_ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Flag to enable/disable port security on the network. It '
'provides the default value for the attribute of the ports '
'created on this network.'),
update_allowed=True,
support_status=support.SupportStatus(version='5.0.0')
),
QOS_POLICY: properties.Schema(
properties.Schema.STRING,
_('The name or ID of QoS policy to attach to this network.'),
constraints=[
constraints.CustomConstraint('neutron.qos_policy')
],
update_allowed=True,
support_status=support.SupportStatus(version='6.0.0')
),
DNS_DOMAIN: properties.Schema(
properties.Schema.STRING,
_('DNS domain associated with this network.'),
constraints=[
constraints.CustomConstraint('dns_domain')
],
update_allowed=True,
support_status=support.SupportStatus(version='7.0.0')
),
TAGS: properties.Schema(
properties.Schema.LIST,
_('The tags to be added to the network.'),
schema=properties.Schema(properties.Schema.STRING),
update_allowed=True,
support_status=support.SupportStatus(version='9.0.0')
),
}
attributes_schema = {
STATUS: attributes.Schema(
_("The status of the network."),
type=attributes.Schema.STRING
),
NAME_ATTR: attributes.Schema(
_("The name of the network."),
type=attributes.Schema.STRING
),
SUBNETS: attributes.Schema(
_("Subnets of this network."),
type=attributes.Schema.LIST
),
ADMIN_STATE_UP_ATTR: attributes.Schema(
_("The administrative status of the network."),
type=attributes.Schema.STRING
),
TENANT_ID_ATTR: attributes.Schema(
_("The tenant owning this network."),
type=attributes.Schema.STRING
),
PORT_SECURITY_ENABLED_ATTR: attributes.Schema(
_("Port security enabled of the network."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.BOOLEAN
),
MTU_ATTR: attributes.Schema(
_("The maximum transmission unit size(in bytes) for the network."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.INTEGER
),
QOS_POLICY_ATTR: attributes.Schema(
_("The QoS policy ID attached to this network."),
type=attributes.Schema.STRING,
support_status=support.SupportStatus(version='6.0.0'),
),
L2_ADJACENCY: attributes.Schema(
_("A boolean value for L2 adjacency, True means that you can "
"expect L2 connectivity throughout the Network."),
type=attributes.Schema.BOOLEAN,
support_status=support.SupportStatus(version='9.0.0'),
),
SEGMENTS: attributes.Schema(
_("The segments of this network."),
type=attributes.Schema.LIST,
support_status=support.SupportStatus(version='11.0.0'),
),
}
def translation_rules(self, properties):
return [translation.TranslationRule(
properties,
translation.TranslationRule.RESOLVE,
[self.QOS_POLICY],
client_plugin=self.client_plugin(),
finder='get_qos_policy_id')]
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
dhcp_agent_ids = props.pop(self.DHCP_AGENT_IDS, None)
qos_policy = props.pop(self.QOS_POLICY, None)
tags = props.pop(self.TAGS, [])
if qos_policy:
props['qos_policy_id'] = qos_policy
net = self.client().create_network({'network': props})['network']
self.resource_id_set(net['id'])
if dhcp_agent_ids:
self._replace_dhcp_agents(dhcp_agent_ids)
if tags:
self.set_tags(tags)
def check_create_complete(self, *args):
attributes = self._show_resource()
self._store_config_default_properties(attributes)
return self.is_built(attributes)
def handle_delete(self):
try:
self.client().delete_network(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.prepare_update_properties(prop_diff)
if self.DHCP_AGENT_IDS in prop_diff:
dhcp_agent_ids = prop_diff.pop(self.DHCP_AGENT_IDS) or []
self._replace_dhcp_agents(dhcp_agent_ids)
if self.QOS_POLICY in prop_diff:
qos_policy = prop_diff.pop(self.QOS_POLICY)
prop_diff[
'qos_policy_id'] = self.client_plugin().get_qos_policy_id(
qos_policy) if qos_policy else None
if self.TAGS in prop_diff:
self.set_tags(prop_diff.pop(self.TAGS))
if prop_diff:
self.client().update_network(self.resource_id,
{'network': prop_diff})
def check_update_complete(self, *args):
attributes = self._show_resource()
return self.is_built(attributes)
def _replace_dhcp_agents(self, dhcp_agent_ids):
ret = self.client().list_dhcp_agent_hosting_networks(
self.resource_id)
old = set([agent['id'] for agent in ret['agents']])
new = set(dhcp_agent_ids)
for dhcp_agent_id in new - old:
try:
self.client().add_network_to_dhcp_agent(
dhcp_agent_id, {'network_id': self.resource_id})
except Exception as ex:
# if 409 is happened, the agent is already associated.
if not self.client_plugin().is_conflict(ex):
raise
for dhcp_agent_id in old - new:
try:
self.client().remove_network_from_dhcp_agent(
dhcp_agent_id, self.resource_id)
except Exception as ex:
# assume 2 patterns about status_code following:
# 404: the network or agent is already gone
# 409: the network isn't scheduled by the dhcp_agent
if not (self.client_plugin().is_conflict(ex) or
self.client_plugin().is_not_found(ex)):
raise
def parse_live_resource_data(self, resource_properties, resource_data):
result = super(Net, self).parse_live_resource_data(
resource_properties, resource_data)
result.pop(self.SHARED)
result[self.QOS_POLICY] = resource_data.get('qos_policy_id')
try:
dhcp = self.client().list_dhcp_agent_hosting_networks(
self.resource_id)
dhcp_agents = set([agent['id'] for agent in dhcp['agents']])
result.update({self.DHCP_AGENT_IDS: list(dhcp_agents)})
except self.client_plugin().exceptions.Forbidden:
# Just don't add dhcp_clients if we can't get values.
pass
return result
def _resolve_attribute(self, name):
if self.resource_id is None:
return
if name == self.SEGMENTS:
return [segment.to_dict() for segment in list(self.client(
'openstack').network.segments(network_id=self.resource_id))]
attributes = self._show_resource()
return attributes[name]
def resource_mapping():
return {
'OS::Neutron::Net': Net,
}
|
<reponame>volodymyrss/astromodels<filename>astromodels/tests/test_tree.py
import pytest
import os
os.environ["ASTROMODELS_DEBUG"] = "debug"
from astromodels.core.tree import Node
from astromodels.core import node_ctype
import gc
def clean():
gc.collect()
class _SimpleInheritance(Node):
def __init__(self, name):
print("INIT of _SimpleInheritance")
self._placeholder = 2
super(_SimpleInheritance, self).__init__(name)
@property
def placeholder(self):
return self._placeholder
class _ComplexInheritance(Node):
def __init__(self, name, min_value, max_value):
super(_ComplexInheritance, self).__init__(name)
self._min_value = min_value
self._max_value = max_value
@property
def min_value(self):
return self._min_value
@property
def max_value(self):
return self._max_value
def test_constructor():
n = Node(name='node1')
assert n.name == 'node1'
with pytest.raises(TypeError):
n2 = Node()
clean()
def test_inheritance():
t = _SimpleInheritance('test1')
assert t.name == 'test1'
clean()
def test_add_child():
t = _SimpleInheritance('test_add_child')
with pytest.raises(TypeError):
t._add_child("clara")
clara = Node("clara")
clara_ref_count = node_ctype._get_reference_counts(clara)
t._add_child(clara)
# We expect 2 more references: one for the map, one for the vector
assert node_ctype._get_reference_counts(clara) == clara_ref_count + 1
assert t.clara == clara
with pytest.raises(AttributeError):
t.clara = 'argh'
assert t.clara == clara
clean()
def test_add_children():
t = _SimpleInheritance("children")
t._add_children([Node("node1"), Node("node2")])
clean()
def test_get_child():
t = _SimpleInheritance("test")
n = Node("node")
t._add_child(n)
assert t._get_child("node") == n
with pytest.raises(AttributeError):
t._get_child("not existing")
clean()
def test_get_children():
node1 = Node('node1')
node2 = Node('node2')
node22 = Node('node22')
node3 = Node('node3')
node1._add_child(node2)
node1._add_child(node22)
node2._add_child(node3)
assert node1._get_children() == (node2, node22)
assert node2._get_children() == (node3,)
clean()
def test_remove_child():
t = _SimpleInheritance("test")
n = Node("node")
for i in range(1000):
t._add_child(n)
assert t._get_child("node") == n
t._remove_child("node")
with pytest.raises(AttributeError):
print t.node
with pytest.raises(AttributeError):
t._get_child("node")
clean()
def test_get_path():
# Make a small tree
node1 = Node('node1')
node2 = Node('node2')
node3 = Node('node3')
node1._add_child(node2)
node2._add_child(node3)
print node3.path
assert node3.path == "node1.node2.node3"
clean()
def test_get_child_from_path():
# Make a small tree
node1 = Node('node1')
node2 = Node('node2')
node3 = Node('node3')
node1._add_child(node2)
node2._add_child(node3)
assert node1._get_child_from_path("node2.node3") == node3
clean()
def test_change_name():
t = _SimpleInheritance("name1")
assert t.name == "name1"
with pytest.raises(AttributeError):
t.name = "name2"
assert t.name == "name1"
t._change_name("name2")
assert t.name == "name2"
clean()
def test_pickle():
print("\n\n\n\n")
import pickle
root = Node("root")
node = Node("node")
root._add_child(node)
d = pickle.dumps(root)
root2 = pickle.loads(d)
assert root2.node.path == 'root.node'
assert root2.node.name == 'node'
# Now test pickling a subclass of Node
root = _SimpleInheritance("root")
root._placeholder = 5.3
node = _SimpleInheritance("node")
root._add_child(node)
d = pickle.dumps(root)
root2 = pickle.loads(d)
assert root2.node.path == 'root.node'
assert root2.node.name == 'node'
print root.placeholder
assert root2.placeholder == root.placeholder
rootc = _ComplexInheritance("root", -1.0, 1.0)
nodec = _ComplexInheritance("node", -5.0, 5.0)
rootc._add_child(nodec)
d = pickle.dumps(rootc)
root2c = pickle.loads(d)
print root2c.min_value
clean()
def test_memory_leaks_setters():
root = Node("root")
node = Node("node")
refc_before_link = node_ctype._get_reference_counts(node)
root._add_child(node)
# Adding a node adds 1 reference
assert node_ctype._get_reference_counts(node) == refc_before_link + 1
# Remove the node and verify that the reference counts goes back to what it was
root._remove_child("node")
# Now we should go back to the original
assert node_ctype._get_reference_counts(node) == refc_before_link
# Now add a second node nested under the first one (root.node.node2)
node2 = Node("node2")
refc_before_link2 = node_ctype._get_reference_counts(node2)
root._add_child(node) # +1 for node
assert node_ctype._get_reference_counts(node) == refc_before_link + 1
node._add_child(node2) # +1 for node2 and +1 for node
assert node_ctype._get_reference_counts(node2) == refc_before_link2 + 1
# "node" is now also parent of node2, so its reference are now 2 more than the original
assert node_ctype._get_reference_counts(node) == refc_before_link + 2
# Clean up and verify that we go back to the original number of references
node._remove_child("node2") # -1 for node2, -1 for node
assert node_ctype._get_reference_counts(node2) == refc_before_link2
assert node_ctype._get_reference_counts(node) == refc_before_link + 1
root._remove_child("node") # -1 for node
assert node_ctype._get_reference_counts(node) == refc_before_link
# Now test add_children
node3 = Node("node3")
node4 = Node("node4")
refc_before_link3 = node_ctype._get_reference_counts(node3)
refc_before_link4 = node_ctype._get_reference_counts(node4)
root._add_children([node3, node4]) # +1 for both nodes
assert node_ctype._get_reference_counts(node3) == refc_before_link3 + 1
assert node_ctype._get_reference_counts(node3) == refc_before_link4 + 1
def test_memory_leaks_getters():
# Now test the getters
root = Node("root")
node1 = Node("node1")
node2 = Node("node2")
node3 = Node("node3")
refc_before_link_root = node_ctype._get_reference_counts(root)
refc_before_link1 = node_ctype._get_reference_counts(node1)
refc_before_link2 = node_ctype._get_reference_counts(node2)
refc_before_link3 = node_ctype._get_reference_counts(node3)
None_counts_before = node_ctype._get_reference_counts(None)
# Add 3 nodes to root
root._add_children([node1, node2, node3]) # node1 +1, node2 +1, node3 + 1, root +3
node_again = root._get_child("node1") # node1 +1
assert node_ctype._get_reference_counts(node1) == refc_before_link1 + 2
del node_again # node1 -1
assert node_ctype._get_reference_counts(node1) == refc_before_link1 + 1
children = root._get_children() #node1 +1, node2 +1, node3 +1
assert len(children) == 3
assert node_ctype._get_reference_counts(node1) == refc_before_link1 + 2
assert node_ctype._get_reference_counts(node2) == refc_before_link2 + 2
assert node_ctype._get_reference_counts(node3) == refc_before_link3 + 2
assert node_ctype._get_reference_counts(root) == refc_before_link_root + 3
# test get_parent
root_again = node1._get_parent() # root +1
assert node_ctype._get_reference_counts(root) == refc_before_link_root + 4
del root_again # root -1
assert node_ctype._get_reference_counts(root) == refc_before_link_root + 3
# test _get_path
path = node2._get_path() # this shouldn't change any ref count
assert node_ctype._get_reference_counts(node1) == refc_before_link1 + 2
assert node_ctype._get_reference_counts(node2) == refc_before_link2 + 2
assert node_ctype._get_reference_counts(node3) == refc_before_link3 + 2
assert node_ctype._get_reference_counts(root) == refc_before_link_root + 3
# test _get_child_from_path
node4 = Node("node4")
refc_before_link4 = node_ctype._get_reference_counts(node4)
node3._add_child(node4) # node3 +1, node4 + 1
node4_again = root._get_child_from_path("node3.node4") # node4 +1
assert node_ctype._get_reference_counts(node4) == refc_before_link4 + 2
assert node_ctype._get_reference_counts(node3) == refc_before_link3 + 3
del node4_again # node4 -1
assert node_ctype._get_reference_counts(node4) == refc_before_link4 + 1
def test_memory_leaks_destructors():
print("\n\n\n\n\n")
for i in range(1000):
print("\n\nRound %i" % (i+1))
# Test destructors
root = Node("root")
node1 = Node("node1")
node2 = Node("node2")
node3 = Node("node3")
refc_before_link_root = node_ctype._get_reference_counts(root)
refc_before_link1 = node_ctype._get_reference_counts(node1)
refc_before_link2 = node_ctype._get_reference_counts(node2)
refc_before_link3 = node_ctype._get_reference_counts(node3)
# root._add_children([node1, node2, node3]) # node1 +1, node2 + 1, node3 + 1, root +3
root._add_child(node1)
root._add_child(node2)
root._add_child(node3)
assert node_ctype._get_reference_counts(root) == refc_before_link_root + 3
assert node_ctype._get_reference_counts(node1) == refc_before_link1 + 1
assert node_ctype._get_reference_counts(node2) == refc_before_link2 + 1
assert node_ctype._get_reference_counts(node3) == refc_before_link3 + 1
#
# Let's destroy the tree
root._remove_child("node1")
root._remove_child("node2")
root._remove_child("node3")
#
# assert node_ctype._get_reference_counts(root) == refc_before_link_root + 3
#
# # Now deleting root should have decreased all nodes by 3, i..e, should have got them back to the initial
# # reference counts
# assert node_ctype._get_reference_counts(node1) == refc_before_link1
# assert node_ctype._get_reference_counts(node2) == refc_before_link2
# assert node_ctype._get_reference_counts(node3) == refc_before_link3
|
<reponame>hidaruma/caty<gh_stars>0
#coding: utf-8
import xjson
import urllib
import cgi
BLOCK = [
'p',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'hr',
'table',
'tr',
'li',
'ul',
'ol',
'pre',
'dt',
'dd',
'dl',
'div',
'blockquote',
]
def markup(input, mode):
return XJXMarkup(input, mode).transform()
def strip(input):
return XJXMarkup(input, None).strip()
class XJXMarkup(object):
def __init__(self, input, mode):
self.input = input
self.mode = mode
def transform(self):
return self.markup(self.input)
def strip(self):
return self._strip(self.input)
def markup(self, input):
r = []
if isinstance(input, (basestring, xjson.TaggedValue)):
return self.markup([input])
for node in input:
if xjson.tag(node) == 'string':
r.append(cgi.escape(node))
else:
tag, data = xjson.split_tag(node)
if tag == 'charref':
r.append(u''.join(data['']))
elif tag == 'section':
tag = 'div'
body = []
attrs = {}
for k, v in data.items():
if k == '':
body = self.markup(self.compress(v))
else:
attrs[k] = cgi.escape(v)
cls = attrs.get('class', u'')
elem = self._to_element(tag, attrs, body)
r.append(elem)
r.append(u'\n')
elif tag == 'ruby':
r.append(self._transform_ruby(data))
else:
attrs = {}
body = []
for k, v in data.items():
if k == '':
body = self.markup(self.compress(v))
else:
attrs[k] = v
elem = self._to_element(tag, attrs, body)
r.append(elem)
if tag in BLOCK:
r.append(u'\n')
return u''.join(r)
def _attrs_to_str(self, data):
attrs = {}
for k, v in data.items():
if k:
attrs[k] = cgi.escape(v)
return self._to_attr(attrs)
def _to_element(self, tag, attrs, body):
tag = tag.lstrip()
if body:
if attrs:
return u'<%(tag)s %(attrs)s>%(body)s</%(tag)s>' % ({'tag': tag,
'attrs':self._to_attr(attrs),
'body': body})
else:
return u'<%(tag)s>%(body)s</%(tag)s>' % ({'tag': tag, 'body': body})
else:
if attrs:
if tag in ('li', 'ul', 'ol') and self.mode == u'html':
return u'<%s %s></%s>' % (tag, self._to_attr(attrs), tag)
return u'<%s %s />' % (tag, self._to_attr(attrs))
elif tag in ('li', 'ul', 'ol') and self.mode == u'html':
return u'<%s></%s>' % (tag, tag)
else:
return u'<%s />' % (tag)
def _to_attr(self, attrs):
r = []
for k, v in attrs.items():
r.append('%s="%s"' % (k, cgi.escape(v)))
return ' '.join(r)
def compress(self, seq):
r = []
t = []
for s in seq:
if isinstance(s, basestring):
t.append(s)
else:
r.append(u''.join(t))
r.append(s)
t = []
if t:
r.append(u''.join(t))
return r
def _transform_ruby(self, data):
rb = self._transform_rb(data[''][0])
rp1 = self._transform_rp(data[''][1])
rt = self._transform_rt(data[''][2])
rp2 = self._transform_rp(data[''][3])
return u'<ruby>%s%s%s%s</ruby>' % (rb, rp1, rt, rp2)
def _transform_rb(self, obj):
c = xjson.untagged(obj)['']
return u'%s' % self.markup(c)
def _transform_rp(self, obj):
c = xjson.untagged(obj)['']
return u'<rp>%s</rp>' % self.markup(c)
def _transform_rt(self, obj):
c = xjson.untagged(obj)['']
return u'<rt>%s</rt>' % self.markup(c)
def _strip(self, input, paragraphfound=False):
r = []
if isinstance(input, (basestring, xjson.TaggedValue)):
return self._strip([input])
for node in input:
if xjson.tag(node) == 'string':
r.append(node)
else:
tag, data = xjson.split_tag(node)
if tag in ('div', 'span') and '__mathjaxsection__' in data.get('class', '').split():
# 数式は区切り記号を一つにして数式の文字列自体はそのまま
r.append(u'$%s$' % (''.join(data[''])))
elif tag in BLOCK and tag != 'p':
# 最初に出現したパラグラフ以外のブロック要素は削除
continue
elif tag == 'p':
if paragraphfound:
continue
else:
paragraphfound = True
r.extend(self._strip(data[''], paragraphfound))
elif tag == 'charref':
r.append(u''.join(data['']))
elif tag == 'br':
# 改行タグは削除
continue
elif tag in ('sup', 'sub', 'em', 'strong', 'a'):
# タグのみ削除
r.extend(self._strip(data[''], paragraphfound))
elif tag == 'span' and self._is_special_span(data.get('class', '').split()):
# 特殊なspanはタグのみ削除
r.extend(self._strip(data[''], paragraphfound))
elif tag == 'img':
# imgはaltを残す
if data.get('ah_filled') != u'auto':
r.append(data.get('alt', u''))
elif tag == 'ruby':
# テキスト(ルビ)にする
rb = u''.join(self._strip(json.untagged(data[''][0])['']))
rp1 = u''.join(self._strip(json.untagged(data[''][1])['']))
rt = u''.join(self._strip(json.untagged(data[''][2])['']))
rp2 = u''.join(self._strip(json.untagged(data[''][3])['']))
r.append(u'%s%s%s%s' % (rb, rp1, rt, rp2))
return u''.join(r)
def _is_special_span(self, classes):
if not classes:
return True
for c in ['tt', 'note', 'notice', 'gext']:
if c in classes:
return True
return False
def find_mathjax(input):
r = []
if isinstance(input, (basestring, xjson.TaggedValue)):
return find_mathjax([input])
for node in input:
if xjson.tag(node) == 'string':
pass
else:
tag, data = xjson.split_tag(node)
for k, v in data.items():
if k == '':
r.extend(find_mathjax(v))
else:
if k == 'class':
cls = v.split()
if '__mathjaxsection__' in cls or '__rawmathml__' in cls:
r.append(True)
return r
def extract_image(input):
if isinstance(input, (basestring, xjson.TaggedValue)):
for i in extract_image([input]):
yield i
for node in input:
if xjson.tag(node) == 'string':
pass
else:
tag, data = xjson.split_tag(node)
for k, v in data.items():
if k == 'src' and tag == 'img':
yield v
elif k == '':
for i in extract_image(v):
yield i
|
#
# Copyright 2021 Mobvista
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import glob
import os
import re
import sys
import subprocess
import yaml
class JobRunnder(object):
def __init__(self):
self._debug_mode = None
self._agent_class = None
self._user_name = None
self._spark_log_level = None
self._job_config = None
self._cmdline_args = None
self._is_local_mode = None
self._batch_size = None
self._worker_count = None
self._server_count = None
self._worker_cpu = None
self._server_cpu = None
self._worker_memory = None
self._server_memory = None
self._python_env = None
self._python_ver = None
self._archives = None
self._py_files = None
self._files = None
self._jars = []
def parse_args(self):
parser = argparse.ArgumentParser(description="job runner for PS on PySpark")
parser.add_argument('-d', '--debug-mode', action='store_true',
help="log commands for debugging purpose")
parser.add_argument('-a', '--agent-class', type=str,
help="PS agent class to use")
parser.add_argument('-u', '--user-name', type=str, required=True,
help="PySpark job user name; required for cluster job")
parser.add_argument('-j', '--job-name', type=str,
help="PySpark job name; generate one if not specified")
parser.add_argument('-c', '--job-config', type=str,
help="job config YAML file path; relative to -C if specified")
parser.add_argument('-b', '--batch-size', type=int,
help="override batch size specified in config file")
parser.add_argument('-w', '--worker-count', type=int,
help="override worker count specified in config file")
parser.add_argument('-s', '--server-count', type=int,
help="override server count specified in config file")
parser.add_argument('--worker-cpu', type=int,
help="override worker cpu specified in config file")
parser.add_argument('--server-cpu', type=int,
help="override server cpu specified in config file")
parser.add_argument('--worker-memory', type=str,
help="override worker memory specified in config file")
parser.add_argument('--server-memory', type=str,
help="override server memory specified in config file")
parser.add_argument('-e', '--python-env', type=str,
help="override python-env.tgz specified in config file")
parser.add_argument('-v', '--python-ver', type=str,
help="override Python version specified in config file; "
"default to the version of the current Python interpreter")
parser.add_argument('--conf', type=str, action='append',
help="pass NAME=VALUE option to PS agent as attributes")
parser.add_argument('--spark-conf', type=str, action='append',
help="pass extra NAME=VALUE --conf options to spark-submit")
parser.add_argument('--spark-env', type=str, action='append',
help="pass extra NAME=VALUE environment options to spark-submit")
parser.add_argument('--spark-archives', type=str, action='append',
help="pass extra --archives options to spark-submit")
parser.add_argument('--spark-py-files', type=str, action='append',
help="pass extra --py-files options to spark-submit")
parser.add_argument('--spark-files', type=str, action='append',
help="pass extra --files options to spark-submit")
parser.add_argument('--spark-jars', type=str, action='append',
help="pass extra --jars options to spark-submit")
SPARK_LOG_LEVELS = 'ALL', 'DEBUG', 'ERROR', 'FATAL', 'INFO', 'OFF', 'TRACE', 'WARN'
parser.add_argument('-L', '--spark-log-level', type=str, default='WARN',
choices=SPARK_LOG_LEVELS, help="set Spark log level; default to WARN")
parser.add_argument('-C', '--chdir', type=str,
help="execute 'chdir' before run the job")
parser.add_argument('--local', action='store_true',
help="run local mode job")
parser.add_argument('--cluster', action='store_true',
help="run cluster mode job")
parser.add_argument('extra_args', type=str, nargs='*',
help="pass extra arguments to spark-submit, "
"only available when no agent class is specified; "
"the '--' argument separator may be used")
args = parser.parse_args()
if not args.local and not args.cluster:
message = "one of --local and --cluster must be specified"
raise RuntimeError(message)
if args.local and args.cluster:
message = "only one of --local and --cluster can be specified"
raise RuntimeError(message)
if not args.agent_class and not args.extra_args:
message = "one of --agent-class and extra_args must be specified"
raise RuntimeError(message)
if args.agent_class and args.extra_args:
message = "only one of --agent-class and extra_args can be specified"
raise RuntimeError(message)
if args.chdir:
os.chdir(args.chdir)
self._debug_mode = args.debug_mode
self._agent_class = args.agent_class
self._user_name = args.user_name
self._job_name = args.job_name
self._spark_log_level = args.spark_log_level
if args.job_config is None:
self._job_config = dict()
else:
with open(args.job_config) as fin:
self._job_config = yaml.full_load(fin)
self._cmdline_args = args
self._is_local_mode = args.local
conf = self._get_spark_config(args)
self._batch_size = self._get_batch_size(args, conf)
self._worker_count = self._get_node_count(args, conf, 'worker')
self._server_count = self._get_node_count(args, conf, 'server')
self._worker_cpu = self._get_node_cpu(args, conf, 'worker')
self._server_cpu = self._get_node_cpu(args, conf, 'server')
self._worker_memory = self._get_node_memory(args, conf, 'worker')
self._server_memory = self._get_node_memory(args, conf, 'server')
if not args.local:
self._python_env = self._get_node_python_env(args, conf)
self._python_ver = self._get_node_python_ver(args, conf)
def _get_spark_config(self, args):
if args.local:
conf = self._job_config.get('local')
if conf is None:
conf = dict()
else:
conf = self._job_config.get('cluster')
if conf is None:
conf = self._job_config.get('distributed')
if conf is None:
conf = dict()
return conf
def _get_batch_size(self, args, conf):
key = 'batch_size'
value = getattr(args, key)
if value is not None:
if value <= 0:
message = "batch size must be positive integer; "
message += "%d specified in command line is invalid" % value
raise ValueError(message)
return value
value = conf.get(key)
if value is None:
message = "batch size is not specified in command line nor config file"
raise RuntimeError(message)
if not isinstance(value, int) or value <= 0:
message = "batch size must be positive integer; "
message += "%r specified in config file is invalid" % value
raise ValueError(message)
return value
def _get_node_count(self, args, conf, role):
key = role + '_count'
value = getattr(args, key)
if value is not None:
if value <= 0:
message = "%s count must be positive; " % role
message += "%d specified in command line is invalid" % value
raise ValueError(message)
return value
value = conf.get(key)
if value is None:
alt_key = role + 's'
value = conf.get(alt_key)
if value is None:
message = "%s count is not specified in command line nor config file" % role
raise RuntimeError(message)
if not isinstance(value, int) or value <= 0:
message = "%s count must be positive integer; " % role
message += "%r specified in config file is invalid" % value
raise ValueError(message)
return value
def _get_node_cpu(self, args, conf, role):
key = role + '_cpu'
value = getattr(args, key)
if value is not None:
if value <= 0:
message = "%s cpu must be positive; " % role
message += "%d specified in command line is invalid" % value
raise ValueError(message)
return value
value = conf.get(key)
if value is None:
if self._is_local_mode:
# This is not used in local mode, return a dummy value.
return 1
message = "%s cpu is not specified in command line nor config file" % role
raise RuntimeError(message)
if not isinstance(value, int) or value <= 0:
message = "%s cpu must be positive integer; " % role
message += "%r specified in config file is invalid" % value
raise ValueError(message)
return value
def _get_node_memory(self, args, conf, role):
key = role + '_memory'
value = getattr(args, key)
if value is not None:
return value
value = conf.get(key)
if value is None:
if self._is_local_mode:
# This is not used in local mode, return a dummy value.
return '1G'
message = "%s memory is not specified in command line nor config file" % role
raise RuntimeError(message)
return value
def _get_node_python_env(self, args, conf):
key = 'python_env'
value = getattr(args, key)
if value is not None:
return value
value = conf.get(key)
if value is None:
key2 = 'python-env'
value = conf.get(key2)
if value is None:
message = "python-env is not specified in command line nor config file"
raise RuntimeError(message)
return value
def _get_node_python_ver(self, args, conf):
key = 'python_ver'
value = getattr(args, key)
if value is not None:
return value
value = conf.get(key)
if value is not None:
return value
v = sys.version_info
value = '%s.%s.%s' % (v.major, v.minor, v.micro)
return value
def _check_python_env(self):
if os.path.isfile(self._python_env):
pass
elif os.path.isdir(self._python_env):
py_ver = '.'.join(self._python_ver.split('.')[:-1])
ma_dir = 'lib/python%s/site-packages/mindalpha' % py_ver
ma_path = os.path.join(self._python_env, ma_dir)
if not os.path.isdir(ma_path):
message = "%r is not a valid python-env, " % self._python_env
message += "because MindAlpha is not found in it"
raise RuntimeError(message)
tgz_path = self._python_env + '.tgz'
tgz_mtime = 0.0
if os.path.isfile(tgz_path):
tgz_mtime = os.path.getmtime(tgz_path)
dir_mtime = os.path.getmtime(self._python_env)
if dir_mtime > tgz_mtime:
args = ['tar', '-czf', tgz_path, '-C', self._python_env] + os.listdir(self._python_env)
subprocess.check_call(args)
self._python_env = tgz_path
else:
message = "python-env %r not found" % self._python_env
raise RuntimeError(message)
def _normalize_option_value(self, value):
if isinstance(value, str):
return value
if value is None:
return 'null'
if value is True:
return 'true'
if value is False:
return 'false'
return str(value)
def _get_driver_memory(self):
return '5G'
def _get_executor_memory(self):
from mindalpha import job_utils
mem = job_utils.merge_storage_size(self._worker_memory, self._server_memory)
return mem
def _get_executor_count(self):
num = self._worker_count + self._server_count
return str(num)
def _get_executor_cores(self):
return str(self._worker_cpu)
def _get_launcher_local_path(self):
from mindalpha import ps_launcher
path = ps_launcher.__file__
return path
def _get_python_executable_path(self):
if self._is_local_mode:
python_path = sys.executable
else:
python_path = './python-env/bin/python'
return python_path
def _get_cluster_ld_library_path(self):
ld_library_path = './python-env/lib'
return ld_library_path
def _get_spark_submit_command(self):
python_path = self._get_python_executable_path()
args = ['env']
args += ['PYSPARK_PYTHON=%s' % python_path]
args += ['PYSPARK_DRIVER_PYTHON=%s' % python_path]
args += ['spark-submit']
return args
def _get_spark_master_config(self):
if self._is_local_mode:
args = ['--master', 'local[%s]' % self._get_executor_count()]
else:
args = ['--master', 'yarn', '--deploy-mode', 'cluster', '--name', self._get_job_name()]
return args
def _get_spark_executors_config(self):
conf = dict()
python_path = self._get_python_executable_path()
conf['spark.sql.execution.arrow.maxRecordsPerBatch'] = self._batch_size
conf['spark.yarn.appMasterEnv.PYSPARK_PYTHON'] = python_path
conf['spark.yarn.appMasterEnv.PYSPARK_DRIVER_PYTHON'] = python_path
conf['spark.executorEnv.PYSPARK_PYTHON'] = python_path
conf['spark.executorEnv.PYSPARK_DRIVER_PYTHON'] = python_path
if not self._is_local_mode:
ld_library_path = self._get_cluster_ld_library_path()
conf['spark.yarn.appMasterEnv.LD_LIBRARY_PATH'] = ld_library_path
conf['spark.executorEnv.LD_LIBRARY_PATH'] = ld_library_path
conf['spark.executorEnv.PYTHONPATH'] = ''
conf['spark.executorEnv.PYTHONNOUSERSITE'] = '1'
conf['spark.python.worker.reuse'] = 'true'
conf['spark.dynamicAllocation.enabled'] = 'false'
conf['spark.shuffle.service.enabled'] = 'false'
conf['spark.sql.execution.arrow.pyspark.enabled'] = 'true'
conf['spark.task.maxFailures'] = '1'
conf['spark.yarn.maxAppAttempts'] = '1'
conf['spark.scheduler.minRegisteredResourcesRatio'] = '1.0'
conf['spark.scheduler.maxRegisteredResourcesWaitingTime'] = '1800s'
spark_conf = self._job_config.get('spark_conf')
spark_env = self._job_config.get('spark_env')
if spark_conf is not None:
conf.update(spark_conf)
if spark_env is not None:
for name, value in spark_env.items():
if self._is_local_mode and name == 'PYTHONPATH':
continue
conf['spark.yarn.appMasterEnv.%s' % name] = value
conf['spark.executorEnv.%s' % name] = value
if self._cmdline_args.spark_conf is not None:
for item in self._cmdline_args.spark_conf:
name, sep, value = item.partition('=')
if not sep:
message = "'=' not found in --spark-conf %s" % item
raise ValueError(message)
conf[name] = value
if self._cmdline_args.spark_env is not None:
for item in self._cmdline_args.spark_env:
name, sep, value = item.partition('=')
if not sep:
message = "'=' not found in --spark-env %s" % item
raise ValueError(message)
if self._is_local_mode and name == 'PYTHONPATH':
continue
conf['spark.yarn.appMasterEnv.%s' % name] = value
conf['spark.executorEnv.%s' % name] = value
args = []
for name, value in conf.items():
value = self._normalize_option_value(value)
args += ['--conf', '%s=%s' % (name, value)]
return args
def _get_spark_resources_config(self):
args = []
if not self._is_local_mode:
args += ['--driver-memory', self._get_driver_memory()]
args += ['--num-executors', self._get_executor_count()]
args += ['--executor-memory', self._get_executor_memory()]
args += ['--executor-cores', self._get_executor_cores()]
args += ['--conf', 'spark.task.cpus=%s' % self._get_executor_cores()]
args += ['--conf', 'spark.kubernetes.executor.request.cores=%s' % self._get_executor_cores()]
args += ['--conf', 'spark.executorEnv.OMP_NUM_THREADS=%s' % self._get_executor_cores()]
return args
def _get_spark_files_config(self):
args = []
from mindalpha.url_utils import use_s3a
if not self._is_local_mode:
args += ['--archives', use_s3a(','.join(self._archives))]
args += ['--py-files', use_s3a(','.join(self._py_files))]
args += ['--files', use_s3a(','.join(self._files))]
if self._jars is not None and len(self._jars) > 0:
args += ['--jars', use_s3a(','.join(self._jars))]
return args
def _get_job_name(self):
import re
from datetime import datetime
from mindalpha import network_utils
from mindalpha import __version__
if self._job_name is not None:
return self._job_name
timestamp = datetime.now().strftime('%Y_%m_%d__%H_%M_%S__%f')
host_ip = network_utils.get_host_ip()
host_ip = re.sub(r'\W', '_', host_ip)
user_name = re.sub(r'\W', '_', self._user_name)
if self._agent_class:
class_name = re.sub(r'\W', '_', self._agent_class)
else:
class_name = 'NoAgentClass'
ps_version = re.sub(r'\W', '_', __version__)
job_name = f'ML__{timestamp}__{host_ip}__{user_name}__{class_name}__PS_{ps_version}'
return job_name
def _get_ps_launcher_config(self):
if not self._agent_class:
return self._cmdline_args.extra_args
args = [self._get_launcher_local_path()]
args += ['--agent-class', str(self._agent_class)]
args += ['--worker-count', str(self._worker_count)]
args += ['--server-count', str(self._server_count)]
args += ['--job-name', self._get_job_name()]
args += ['--spark-log-level', self._spark_log_level]
conf = dict()
agent_conf = self._job_config.get('agent')
if agent_conf is not None:
conf.update(agent_conf)
if self._cmdline_args.conf is not None:
for item in self._cmdline_args.conf:
name, sep, value = item.partition('=')
if not sep:
message = "'=' not found in --conf %s" % item
raise ValueError(message)
conf[name] = value
for name, value in conf.items():
value = self._normalize_option_value(value)
args += ['--conf', '%s=%s' % (name, value)]
return args
def find_files(self):
# jars should be included for local mode
config_jars = self._job_config.get('spark_jars')
if config_jars is not None:
self._jars.extend([config_jars] if type(config_jars) == str else config_jars)
if self._cmdline_args.spark_jars is not None:
jars = self._cmdline_args.spark_jars
self._jars.extend([jars] if type(jars) == str else jars)
# ignore archives, py_files, files for local mode
if self._is_local_mode:
return
self._check_python_env()
archives = []
py_files = []
files = []
def scan_files(dir_path):
items = os.listdir(dir_path)
for item in items:
item_path = os.path.join(dir_path, item)
if os.path.isdir(item_path):
if item in ('python-env', '.pyenv', '.git') or item.startswith('spark-'):
continue
scan_files(item_path)
elif os.path.isfile(item_path):
if item.endswith('.py'):
py_files.append(item_path + '#' + item_path)
elif item.endswith('.yaml'):
files.append(item_path + '#' + item_path)
elif item.endswith('.txt'):
if item.startswith('column_name') or item.startswith('combine_schema'):
files.append(item_path + '#' + item_path)
archives.append(self._python_env + '#python-env')
scan_files('.')
spark_archives = self._job_config.get('spark_archives')
spark_py_files = self._job_config.get('spark_py_files')
spark_files = self._job_config.get('spark_files')
if spark_archives is not None:
for name, path in spark_archives.items():
archives.append('%s#%s' % (path, name))
if spark_py_files is not None:
for name, path in spark_py_files.items():
py_files.append('%s#%s' % (path, name))
if spark_files is not None:
for name, path in spark_files.items():
files.append('%s#%s' % (path, name))
if self._cmdline_args.spark_archives is not None:
for item in self._cmdline_args.spark_archives:
archives.append(item)
if self._cmdline_args.spark_py_files is not None:
for item in self._cmdline_args.spark_py_files:
py_files.append(item)
if self._cmdline_args.spark_files is not None:
for item in self._cmdline_args.spark_files:
files.append(item)
self._archives = tuple(archives)
self._py_files = tuple(py_files)
self._files = tuple(files)
def spark_submit(self):
args = self._get_spark_submit_command()
args += self._get_spark_master_config()
args += self._get_spark_executors_config()
args += self._get_spark_resources_config()
args += self._get_spark_files_config()
args += self._get_ps_launcher_config()
if self._debug_mode:
from mindalpha import shell_utils
shell_utils.log_command(args)
try:
subprocess.check_call(args)
except subprocess.CalledProcessError as e:
from mindalpha import shell_utils
message = "spark-submit command failed with exit code %d" % e.returncode
shell_utils.log_command(args)
shell_utils.log_error(message)
raise RuntimeError(message) from e
def run(self):
self.parse_args()
self.find_files()
self.spark_submit()
def main():
runner = JobRunnder()
runner.run()
if __name__ == '__main__':
main()
|
<gh_stars>1-10
import os
from scipy.spatial import KDTree
import ttide as tt
import datetime
import numpy as np
import scipy.io as sio
from .log_progress import log_progress
from .haversine import haversine
def read_atg(atg_data,site_id,constit_list):
site_data = {}
for key in ['site_id','name','lat','lon','amp','Gphase','reclen','delta_t','meas_type','ref']:
site_data[key] =np.squeeze(atg_data[key][0,0][site_id-1])
site_data['constit']=np.squeeze(atg_data['constit'][0,0][:])
site_data['name'] = site_data['name'].strip()
cm2m = 1.0/100.0
for const in constit_list:
atg_con_ind = list(site_data['constit']).index(const)
site_data[const]=np.array([site_data['amp'][atg_con_ind]*cm2m, site_data['Gphase'][atg_con_ind]])
return site_data
def station_ttide(zeta_da,grid,lat_t,lon_t,stime,constit_list):
zeta_flat = zeta_da.stack(etaxi = ('eta_rho','xi_rho'))
grid_flat = grid.stack(etaxi = ('eta_rho','xi_rho'))
lat_s = grid_flat.lat_rho.values[grid_flat.mask_rho.values==True]
lon_s = grid_flat.lon_rho.values[grid_flat.mask_rho.values==True]
zeta_s = zeta_flat.values[:,grid_flat.mask_rho.values==True]
etaxi_s = zeta_flat.etaxi.values[grid_flat.mask_rho.values==True]
points = np.column_stack((lat_s,lon_s))
tree = KDTree(points)
target = np.column_stack((lat_t,lon_t))
dist, ind = tree.query(target)
#dist=dist*10.0
tmp={}
tmp['roms_signal'] = zeta_s[:,ind].squeeze()
tmp['roms_ind'],tmp['dist_to ATG'] = ind,dist
lat_r = lat_s[ind]
lon_r = lon_s[ind]
eta_rho,xi_rho = np.fromstring(str(etaxi_s[ind])[2:-2], sep=', ',dtype=int)
#print('atg lat(lon): %.2f,%.2f'%(lat_t,lon_t))
#print('roms lat(lon): %.2f,%.2f'%(lat_r,lon_r))
dist = haversine(lon_t,lat_t,lon_r,lat_r)
try:
tmp['t_tide']=tt.t_tide(tmp['roms_signal'],dt=1,stime=stime,lat=lat_r,out_style=None)
except TypeError:
for const in constit_list:
tmp[const]=[np.nan,np.nan,np.nan,np.nan]
return eta_rho,xi_rho,dist,tmp
for const in constit_list:
tide_con_ind = list(tmp['t_tide']['nameu']).index(str.encode(const+' '))
tmp[const]=tmp['t_tide']['tidecon'][tide_con_ind]
#print(eta_rho,xi_rho)
return eta_rho,xi_rho,dist,tmp
def rmse(predictions, targets):
return np.sqrt(np.nanmean((predictions - targets) ** 2))
def complex_rmse(predictions, targets):
return np.sqrt(0.5*np.nanmean(((predictions - targets)*np.conjugate(predictions - targets)).real))
def calc_rmse(station_dict,constit_list):
const_rmse={}
for constit in constit_list:
tt_amp_all = []
atg_amp_all = []
tt_phi_all =[]
atg_phi_all =[]
tt_z_all = []
atg_z_all = []
for station,data in station_dict.items():
tt_amp = data['tt'][constit][0]
atg_amp = data['atg'][constit][0]
tt_phi = data['tt'][constit][2]
atg_phi = data['atg'][constit][1]
tt_amp_all.append(tt_amp)
atg_amp_all.append(atg_amp)
tt_phi_all.append(tt_phi)
atg_phi_all.append(atg_phi)
tt_z_all.append(tt_amp * np.exp(1j*tt_phi))
atg_z_all.append(atg_amp * np.exp(1j*atg_phi))
const_rmse[constit] = {}
const_rmse[constit]['amp']=rmse(np.asarray(tt_amp_all),np.asarray(atg_amp_all))
const_rmse[constit]['phase']=rmse(np.asarray(tt_phi_all),np.asarray(atg_phi_all))
const_rmse[constit]['complex_amp']=complex_rmse(np.asarray(atg_z_all),np.asarray(tt_z_all))
return const_rmse
def print_station_dict(station_dict,constit_list):
print("Station ID || Amp(amp_err)[m]: atg roms || phase(phase_err)[deg]: atg roms || Station Name; RecLen [days]; Nearest Neibour [km]")
for constit in constit_list:
print(constit)
for station_id,data in station_dict.items():
print(station_id,"|| %0.2f"%data['atg'][constit][0]," %0.2f(%0.2f) "%(data['tt'][constit][0],data['tt'][constit][1]),\
"|| %0.2f"%data['atg'][constit][1]," %0.2f(%0.2f) "%(data['tt'][constit][2],data['tt'][constit][3]),\
"|| ",data['atg']['name']," ",data['atg']['reclen'],' %0.2f' %data['dist'][0])
def print_rmse(rmse_dict,constit_list):
for constit in constit_list:
data = rmse_dict[constit]
print(constit+' RMSD: amp = %.2f m phase = %.2f deg complex amp = %.2f m'%(data['amp'],data['phase'],data['complex_amp']))
def compare_atg(roms_zeta_da,grid,atg_mat_path=os.path.join(os.environ.get('projdir'),'data','analysis','external','atg','ATG_ocean_height_2010_0908.mat'),stime=datetime.datetime(2007,1,1),constit_list = ['M2','O1'],station_list=np.arange(1,109),print_flag=True):
print('stime = ',stime,' constits = ',constit_list,'stations = ',station_list)
mat_content = sio.loadmat(atg_mat_path)
atg_data = mat_content['atg']
station_dict = {}
for station in log_progress(station_list,name='stations'):
#print('processing station ',station)
station_dict[station] = {}
atg_dict = read_atg(atg_data,station,constit_list)
lat = atg_dict['lat']
lon = atg_dict['lon']
eta_rho,xi_rho,dist,tt_dict = station_ttide(roms_zeta_da,grid,lat,lon,stime,constit_list)
#print_comparison(tt_dict,atg_dict,constit_list)
station_dict[station]['atg'] = atg_dict
station_dict[station]['tt'] = tt_dict
station_dict[station]['dist'] = dist
station_dict[station]['eta_rho'] = eta_rho
station_dict[station]['xi_rho'] = xi_rho
rmse_dict = calc_rmse(station_dict,constit_list)
if print_flag == True:
print_station_dict(station_dict,constit_list)
print_rmse(rmse_dict,constit_list)
return station_dict,rmse_dict
|
import locale
import os
from shutil import copyfile
import numpy as np
from scipy.io import wavfile
from timecode import Timecode
from edit_point import EditPoint
from parameters import InputParameter
from shell_utils import do_shell
OS_ENCODING = locale.getpreferredencoding()
class BaseOutput(object):
def __init__(self, parameter: InputParameter):
self.parameter = parameter
self.input_file_dir = os.path.dirname(parameter.input_file)
self.input_file_name = os.path.basename(parameter.input_file)
self.input_file_name_without_extension = self.input_file_name[:self.input_file_name.rfind('.')]
def apply_edit_point(self, edit_point: EditPoint, audio_data, start_output_frame, end_output_frame):
pass
def close(self):
pass
class EdlOutput(BaseOutput):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.parameter.output_file:
self.parameter.output_file = os.path.join(
self.input_file_dir,
f'{self.input_file_name_without_extension}.edl'
)
self.edl_file = open(self.parameter.output_file, "w", encoding=OS_ENCODING)
self.edl_file.write(f'TITLE: {self.input_file_name_without_extension}\n\n')
self.index = 1
def apply_edit_point(self, edit_point: EditPoint, audio_data, start_output_frame, end_output_frame):
edit_point_output_start = Timecode(self.parameter.frame_rate, frames=start_output_frame + 1)
edit_point_output_end = Timecode(self.parameter.frame_rate, frames=end_output_frame + 1)
# provide one frame buffer for motion events. if the output length is less than 2 frames, cut it off.
if edit_point_output_end.frames - edit_point_output_start.frames > 1:
edit_point_start = Timecode(self.parameter.frame_rate, frames=edit_point.start_frame + 1)
edit_point_end = Timecode(self.parameter.frame_rate, frames=edit_point.end_frame + 1)
self.edl_file.write(
f'{self.index:03d} AX AA/V C '
f'{edit_point_start} {edit_point_end} {edit_point_output_start} {edit_point_output_end}\n')
self.edl_file.write(f'* FROM CLIP NAME: {self.input_file_name}\n')
if not edit_point.should_keep:
# M2 AX 086.7 00:00:16:16
output_length = edit_point_output_end - edit_point_output_start
original_length = edit_point_end - edit_point_start
if output_length != original_length:
# adobe premiere may complain about the motion events with such an 'accurate' new_frame_rate.
# so we leave one frame as a buffer to hold the whole input video frames after speed changes.
# it's safe to subtract 1 from output_length as we have already guaranteed 2 frames at least.
new_frame_rate = original_length.frames / (output_length.frames - 1) * self.parameter.frame_rate
self.edl_file.write(
f'M2 AX '
f'{new_frame_rate:05.1f}'
f' '
f'{edit_point_start}\n'
)
self.edl_file.write('\n')
self.index += 1
def close(self):
self.edl_file.close()
class DirectVideoOutput(BaseOutput):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.parameter.output_file:
self.parameter.output_file = os.path.join(
self.input_file_dir,
f'{self.input_file_name_without_extension}_edited{self.input_file_name[self.input_file_name.rfind("."):]}'
)
self.audio_edit_config = []
self.video_edit_config = []
def apply_edit_point(self, edit_point: EditPoint, audio_data, start_output_frame, end_output_frame):
edit_point_output_start = Timecode(self.parameter.frame_rate, frames=start_output_frame + 1)
edit_point_output_end = Timecode(self.parameter.frame_rate, frames=end_output_frame + 1)
# provide one frame buffer for motion events. if the output length is less than 2 frames, cut it off.
if edit_point_output_end.frames - edit_point_output_start.frames <= 1:
edit_point_start = Timecode(self.parameter.frame_rate, frames=edit_point.start_frame + 1)
edit_point_end = Timecode(self.parameter.frame_rate, frames=edit_point.end_frame + 1)
self.video_edit_config.append(f"between(n, {edit_point_start.frames}, {edit_point_end.frames - 1})")
self.audio_edit_config.append(f"between(t, {edit_point_start.float}, {edit_point_end.float})")
def close(self):
with open(f"{self.parameter.temp_folder}/filter_script.txt", "w", encoding=OS_ENCODING) as config_file:
config_file.write("select='not(\n")
config_file.write("+".join(self.video_edit_config))
config_file.write(")',setpts=N/FR/TB; \n")
config_file.write("aselect='not(\n")
config_file.write("+".join(self.audio_edit_config))
config_file.write(")', asetpts=N/SR/TB\n")
# Use ffmpeg filter to cut videos directly.
if self.parameter.use_hardware_acc:
do_shell(
f'ffmpeg -hwaccel cuda -thread_queue_size 1024 '
f'-y -filter_complex_script {self.parameter.temp_folder}/filter_script.txt '
f'-i {self.parameter.input_file} -c:v h264_nvenc "{self.parameter.output_file}"'
)
else:
do_shell(
f'ffmpeg -thread_queue_size 1024 '
f'-y -filter_complex_script {self.parameter.temp_folder}/filter_script.txt '
f'-i {self.parameter.input_file} "{self.parameter.output_file}"'
)
# Deprecated. Will be removed soon.
class LegacyVideoOutput(BaseOutput):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.parameter.output_file:
self.parameter.output_file = os.path.join(
self.input_file_dir,
f'{self.input_file_name_without_extension}_edited{self.input_file_name[self.input_file_name.rfind("."):]}'
)
do_shell(f'ffmpeg -i "{self.parameter.input_file}" -qscale:v {str(self.parameter.frame_quality)} {self.parameter.temp_folder}/frame%06d.jpg -hide_banner')
self.last_existing_frame = None
self.output_audio_data = np.zeros((0, self.parameter.audio_data.shape[1]))
def apply_edit_point(self, edit_point: EditPoint, audio_data, start_output_frame, end_output_frame):
self.output_audio_data = np.concatenate(
(self.output_audio_data, audio_data / self.parameter.max_audio_volume))
for outputFrame in range(start_output_frame, end_output_frame):
input_frame = int(
edit_point.start_frame + self.parameter.new_speed[int(edit_point.should_keep)] * (outputFrame - start_output_frame))
did_it_work = self.copy_frame(input_frame, outputFrame)
if did_it_work:
self.last_existing_frame = input_frame
else:
self.copy_frame(self.last_existing_frame, outputFrame)
def copy_frame(self, input_frame, output_frame):
src = f"{self.parameter.temp_folder}/frame{input_frame + 1:06d}.jpg"
dst = f"{self.parameter.temp_folder}/newFrame{output_frame + 1:06d}.jpg"
if not os.path.isfile(src):
return False
copyfile(src, dst)
if output_frame % 20 == 19:
print(str(output_frame + 1) + " time-altered frames saved.")
return True
def close(self):
wavfile.write(f'{self.parameter.temp_folder}/audioNew.wav', self.parameter.sample_rate, self.output_audio_data)
do_shell(
f'ffmpeg -thread_queue_size 1024 -framerate {str(self.parameter.frame_rate)} '
f'-i {self.parameter.temp_folder}/newFrame%06d.jpg -i {self.parameter.temp_folder}/audioNew.wav -strict -2 "{self.parameter.output_file}"'
) |
<gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, List, Sized, Tuple
import torch as torch
import torch.nn as nn
from pytext.utils.cuda_utils import xaviervar
# token/non-terminal/sub-tree element on a stack.
# need this value for computing valid actions
class Element:
def __init__(self, node) -> None:
self.node = node
def __str__(self):
return str(self.node)
def __repr__(self):
return self.__str__()
class StackLSTM(Sized):
def __init__(self, rnn, initial_state, p_empty_embedding):
self.rnn = rnn
self.list = (
[(initial_state, (self._rnn_get_output(initial_state), "Root"))]
if initial_state
else None
)
self.empty = p_empty_embedding
def _rnn_get_output(self, state):
return state[0][-1]
def push(self, expr, ele: Element) -> None:
# assuming expr is always one element at a time; not a sequence.
# making it a sequence since LSTM takes a sequence
expr = expr.unsqueeze(1)
output, new_embedding = self.rnn(expr, self.list[-1][0])
self.list.append((new_embedding, (self._rnn_get_output(new_embedding), ele)))
def pop(self) -> Tuple[Any, Element]:
# returning tuple of out embedding and the name of the element
return self.list.pop()[1]
def top(self) -> Tuple[Any, Element]:
return self.list[-1][1]
def embedding(self):
return (
self._rnn_get_output(self.list[-1][0]) if len(self.list) > 1 else self.empty
)
def first_ele_match(self, funct):
for st in self.list[::-1]:
if funct(st):
return st[1][1]
return None
def ele_from_top(self, index: int) -> Element:
return self.list[len(self.list) - index - 1][1][1]
def __len__(self):
return len(self.list) - 1
def __str__(self):
return "->".join([str(x[1][1]) for x in self.list])
def copy(self):
other = StackLSTM(self.rnn, None, self.empty)
other.list = list(self.list)
return other
class CompositionFunction(nn.Module):
def __init__(self):
super().__init__()
class CompositionalNN(CompositionFunction):
def __init__(self, lstm_dim):
super().__init__()
self.lstm_dim = lstm_dim
self.lstm_fwd = nn.LSTM(lstm_dim, lstm_dim, 1)
self.lstm_rev = nn.LSTM(lstm_dim, lstm_dim, 1)
self.linear_seq = nn.Sequential(nn.Linear(2 * lstm_dim, lstm_dim), nn.Tanh())
def forward(self, x):
"""
Embed the sequence. If the input corresponds to [IN:GL where am I at]:
- x will contain the embeddings of [at I am where IN:GL] in that order.
- Forward LSTM will embed the sequence [IN:GL where am I at].
- Backward LSTM will embed the sequence [IN:GL at I am where].
The final hidden states are concatenated and then projected.
Args:
x: Embeddings of the input tokens in *reversed* order
"""
# reset hidden every time
lstm_hidden_fwd = (
xaviervar(1, 1, self.lstm_dim),
xaviervar(1, 1, self.lstm_dim),
)
lstm_hidden_rev = (
xaviervar(1, 1, self.lstm_dim),
xaviervar(1, 1, self.lstm_dim),
)
nt_element = x[-1]
rev_rest = x[:-1]
# Always put nt_element at the front
fwd_input = [nt_element] + rev_rest[::-1]
rev_input = [nt_element] + rev_rest
stacked_fwd = self.lstm_fwd(torch.stack(fwd_input), lstm_hidden_fwd)[0][0]
stacked_rev = self.lstm_rev(torch.stack(rev_input), lstm_hidden_rev)[0][0]
combined = torch.cat([stacked_fwd, stacked_rev], dim=1)
subtree_embedding = self.linear_seq(combined)
return subtree_embedding
class CompositionalSummationNN(CompositionFunction):
def __init__(self, lstm_dim):
super().__init__()
self.lstm_dim = lstm_dim
self.linear_seq = nn.Sequential(nn.Linear(lstm_dim, lstm_dim), nn.Tanh())
def forward(self, x):
combined = torch.sum(torch.cat(x, dim=0), dim=0, keepdim=True)
subtree_embedding = self.linear_seq(combined)
return subtree_embedding
class ParserState:
# Copies another state instead if supplied
def __init__(self, parser=None):
if not parser:
return
# Otherwise initialize normally
self.buffer_stackrnn = StackLSTM(
parser.buff_rnn, parser.init_lstm(), parser.pempty_buffer_emb
)
self.stack_stackrnn = StackLSTM(
parser.stack_rnn, parser.init_lstm(), parser.empty_stack_emb
)
self.action_stackrnn = StackLSTM(
parser.action_rnn, parser.init_lstm(), parser.empty_action_emb
)
self.predicted_actions_idx = []
self.action_scores = []
self.num_open_NT = 0
self.is_open_NT: List[bool] = []
self.found_unsupported = False
# negative cumulative log prob so sort(states) is in descending order
self.neg_prob = 0
def finished(self):
return len(self.stack_stackrnn) == 1 and len(self.buffer_stackrnn) == 0
def copy(self):
other = ParserState()
other.buffer_stackrnn = self.buffer_stackrnn.copy()
other.stack_stackrnn = self.stack_stackrnn.copy()
other.action_stackrnn = self.action_stackrnn.copy()
other.predicted_actions_idx = self.predicted_actions_idx.copy()
other.action_scores = self.action_scores.copy()
other.num_open_NT = self.num_open_NT
other.is_open_NT = self.is_open_NT.copy()
other.neg_prob = self.neg_prob
other.found_unsupported = self.found_unsupported
return other
def __gt__(self, other):
return self.neg_prob > other.neg_prob
def __eq__(self, other):
return self.neg_prob == other.neg_prob
|
from datetime import datetime, timedelta
from NodeDefender.db.sql import SQL, HeatModel, NodeModel, iCPEModel, GroupModel
from sqlalchemy import func
from sqlalchemy.sql import label
from itertools import groupby
def current(group):
group = SQL.session.query(GroupModel).filter(GroupModel.name ==
group).first()
if group is None:
return False
ret_data = []
group_data = {}
group_data['name'] = group.name
group_data['heat'] = 0.0
for node in group.nodes:
if not node.icpe:
continue
node_data = {}
node_data['name'] = node.name
min_ago = (datetime.now() - timedelta(hours=0.5))
latest_heat = SQL.session.query(HeatModel,\
label('sum', func.sum(HeatModel.average)),
label('count', func.count(HeatModel.average))).\
join(HeatModel.icpe).\
filter(iCPEModel.mac_address == node.icpe.mac_address).\
filter(HeatModel.date > min_ago).first()
if latest_heat.count:
node_data['heat'] = latest_heat.sum / latest_heat.count
group_data['heat'] += node_data['heat']
else:
node_data['heat'] = 0.0
ret_data.append(node_data)
ret_data.append(group_data)
return ret_data
def average(group):
group = SQL.session.query(GroupModel).filter(GroupModel.name ==
group).first()
if group is None:
return False
min_ago = (datetime.now() - timedelta(hours=0.5))
day_ago = (datetime.now() - timedelta(days=1))
week_ago = (datetime.now() - timedelta(days=7))
month_ago = (datetime.now() - timedelta(days=30))
group_data = {}
group_data['name'] = group.name
group_data['current'] = 0.0
group_data['daily'] = 0.0
group_data['weekly'] = 0.0
group_data['monthly'] = 0.0
icpes = [node.icpe.mac_address for node in group.nodes if node.icpe]
current_heat = SQL.session.query(HeatModel,\
label('sum', func.sum(HeatModel.average)),
label('count', func.count(HeatModel.average))).\
join(HeatModel.icpe).\
filter(iCPEModel.mac_address.in_(*[icpes])).\
filter(HeatModel.date > min_ago).first()
daily_heat = SQL.session.query(HeatModel,\
label('sum', func.sum(HeatModel.average)),
label('count', func.count(HeatModel.average))).\
join(HeatModel.icpe).\
filter(iCPEModel.mac_address.in_(*[icpes])).\
filter(HeatModel.date > day_ago).first()
weekly_heat = SQL.session.query(HeatModel,\
label('sum', func.sum(HeatModel.average)),
label('count', func.count(HeatModel.average))).\
join(HeatModel.icpe).\
filter(iCPEModel.mac_address.in_(*[icpes])).\
filter(HeatModel.date > week_ago).first()
monthly_heat = SQL.session.query(HeatModel,\
label('sum', func.sum(HeatModel.average)),
label('count', func.count(HeatModel.average))).\
join(HeatModel.icpe).\
filter(iCPEModel.mac_address.in_(*[icpes])).\
filter(HeatModel.date > month_ago).first()
if current_heat.count:
current_heat = (current_heat.sum / current_heat.count)
else:
current_heat = 0.0
if daily_heat.count:
daily_heat = (daily_heat.sum / daily_heat.count)
else:
daily_heat = 0.0
if weekly_heat.count:
weekly_heat = (weekly_heat.sum / weekly_heat.count)
else:
weekly_heat = 0.0
if monthly_heat.count:
monthly_heat = (monthly_heat.sum / monthly_heat.count)
else:
monthly_heat = 0.0
group_data['current'] = current_heat
group_data['daily'] = daily_heat
group_data['weekly'] = weekly_heat
group_data['monthly'] = monthly_heat
return group_data
def chart(group):
from_date = (datetime.now() - timedelta(days=30))
to_date = datetime.now()
group = SQL.session.query(GroupModel).filter(GroupModel.name ==
group).first()
if group is None:
return False
ret_data = []
for node in group.nodes:
if not node.icpe:
continue
heat_data = SQL.session.query(HeatModel).\
join(HeatModel.icpe).\
filter(iCPEModel.mac_address == node.icpe.mac_address).\
filter(HeatModel.date > from_date).\
filter(HeatModel.date < to_date).all()
if not heat_data:
continue
node_data = {}
node_data['name'] = node.name
node_data['heat'] = []
grouped_data = [list(v) for k, v in groupby(heat_data, lambda p:
p.date)]
for data in grouped_data:
entry = {'date' : str(data[0].date)}
for heat in data:
try:
entry['value'] = (heat.average + entry['heat']) / 2
except KeyError:
entry['value'] = heat.average
node_data['heat'].append(entry)
ret_data.append(node_data)
return ret_data
|
<reponame>cuttlefishh/papers
#!/usr/bin/env python
import urllib
import sys
import os
##### totalannotation.py by <NAME>
##### This script takes an input fasta file of sequence names and sequences, and blast results files of blasts against
##### nr (parsed .txt with 1 hit per line) and swissprot and tremble (in -outfmt 7) uniprot databases
##### and downloads the corresponding uniprot flat files from the www.uniprot.org web server,
##### extracts particular annotation information from the nr blast and each uniprot flat file and combines it into a meta-annotation table.
##### you will need to create a 2-line .txt file that has the names of the particular columns you would like to extract from the
##### nr parsed blast file separated by tabs (these files can be large so I suggest extracting the header using head or less in terminal
##### the second line consists of the "bad words" you want to skip over in you nr results separated by tabs.
##### I usually use "predicted PREDICTED hypothetical unknown" or some combination thereof.
# usage is totalannotation.py YOUR_contigs.fasta BLASTx2nr.txt nrcolumnheadersandbadwords.txt BLASTx2Sprot.txt BLASTx2TrEMBL.txt evaluethreshold directoryforflatfiles(no slashes) outtablename.txt
#this is for setting how the script sorts your contigs into order
#change the word to 'text' for a text-based sorting or 'coral' for a
#palumbi-lab coral-specific numerical sorting
textorcoralsort = 'text'
#innames, inseqs read_fasta_lists(sys.argv[1])
#sys.argv[2] = BLASTx2nr.txt
#sys.argv[3] = thingsfornr.txt
#uniprotIDs read_uniprot(sys.argv[4], sys.argv[5])
evalue=float(sys.argv[6])
directory=sys.argv[7] #name only, no /'s
#o=open(str(sys.argv[8]), 'w') # New data table file name
#####This reads in a fasta file and extracts the sequence names into a dictionary as the keys
def read_fasta_dict(file):
fin = open(file, 'r')
filelines=fin.readlines()
filelines.append('EOF')
count=0
names={}
seqs=[]
numseqs=0
for line in filelines:
if line=='EOF':
names[cols[0]]='%i' %(len(seq))
line=line.strip()
if line and line[0] == '>': #indicates the name of the sequence
if count>=1:
names[cols[0]]='%i' %(len(seq))
count+=1
line=line[1:]
cols=line.split(' ')
seq=''
else: seq +=line
fin.close()
return names
innames=read_fasta_dict(sys.argv[1])
print 'Read in fasta of %i sequences: ...' %(len(innames.keys()))
####This function reads in a parsed (every hit on one line) nr blast file and extracts certain columns and returns a dictionary
def nr_dict(file, colstoextract):
fin = open(file, 'r') # open input file
cols2extract = open(colstoextract, 'r')
d={}
headers=[]
contig=''
linenum=0
goodnrhits=0
for line in fin:
linenum+=1
line=line.rstrip()
cols=line.split('\t')
if linenum == 1:
headers=line #Used to copy header to new files
# this loop is for extracting the column indexes for the column names specified on the first line of the stufffornr.txt file
extractlinecount=0
for aline in cols2extract:
extractlinecount+=1
if extractlinecount==1:
aline=aline.rstrip()
words=aline.split('\t')
hitdescription=cols.index(words[0])
nrEval=cols.index(words[1])
if linenum >1:
cols[0]=cols[0].split(' ')[0]
if cols[0] == contig:
# print line
d[cols[0]].append('%s\t%s' %(cols[hitdescription],cols[nrEval]))
else:
if float(cols[nrEval]) <= evalue:
goodnrhits+=1
contig = cols[0]
numhit = 1
d[cols[0]]=d.get(cols[0],[])
d[cols[0]].append('%s\t%s' %(cols[hitdescription],cols[nrEval]))
fin.close()
cols2extract.close()
return headers, d, goodnrhits
headers, d, goodnrhits=nr_dict(sys.argv[2], sys.argv[3])
print "Read in nr blast..."
print '%s%i' %('Number of good nr matches: ',goodnrhits)
print '%s%i' %('Number not matched in nr: ',len(innames.keys())-goodnrhits)
print "Searching for badwords..."
######This function parses the nr dictionary for hits that do not contain badwords (e.g. 'Predicted', 'hypothetical', etc.)
def parse_badwords(value, badwords):
onlybad=0
madegood=0
badhits=[]
goodhits=[]
tophit=value[0]
for each in value:
numbadhits=0
for item in badwords:
if item in each:
numbadhits+=1
if numbadhits >=1:
badhits.append(each)
if numbadhits == 0:
goodhits.append(each)
if len(goodhits)==0:
onlybad +=1
if len(goodhits)>=1:
madegood +=1
goodhits+=badhits
return tophit, goodhits, onlybad, madegood
badwordlist=[]
#reading in a list of badwords from stufffornr.txt
badwordfile=open(sys.argv[3],'r')
badwordline=0
for line in badwordfile:
badwordline+=1
if badwordline==2:
line=line.rstrip()
badwordlist=line.split('\t')
onlybadnrs=0
madegoodnrs=0
####this step loops through the entrys in your contig dictionary
####and calls the badword parser for each entry that has a match in the nr dictionary and returns the top hit and the top non-badword hit (if there is one)
for key,value in innames.items():
if d.has_key(key):
tophit, goodhits, onlybad, madegood= parse_badwords(d[key], badwordlist)
innames[key]='%s\t%s\t%s' %(innames[key],tophit, goodhits[0])
onlybadnrs+=onlybad
madegoodnrs+=madegood
else:
innames[key]+='\t%s\t%s\t%s\t%s' %('No_sig_nr_hit','No_sig_nr_hit','No_sig_nr_hit','No_sig_nr_hit')
print '%s%i' %('Number of nr hits with only a bad word hit: ', onlybadnrs)
print '%s%i' %('Number of nr hits with a good word hit: ', madegoodnrs)
#######This function reads in the swissprot and trembl outputs and returns
#######a dictionary that contains the top uniprot ID from swissprot (if available) or trembl (if no swissprot match was found)
def read_uniprot(sprotfile,tremblfile):
queryname=''
uniprotIDs={}
uniqueprotIDs={}
sprotmatch=0
tremblpeats=0
tremblmatch=0
sprot = open(sprotfile,'r')
trembl = open(tremblfile,'r')
for line in sprot:
line=line.rstrip()
if line[0] == '#':
continue
else:
cols=line.split('\t')
if cols[0] == queryname:
continue
else:
# if float(cols[10]) <= evalue and cols[1].split('|')[2].split('_')[1] != 'NEMVE': #for parsing based on threshold value and excluding hits to Nematostella
if float(cols[10]) <= evalue: #for parsing based on threshold value only
ID=cols[1].split('|')
uniprotIDs[cols[0]]=uniprotIDs.get(cols[0],[])
uniprotIDs[cols[0]].append(ID[1])
if innames.has_key(cols[0]):
sprotmatch+=1
innames[cols[0]]+='\t%s\t%s\t%s' %(ID[1],cols[2],cols[10])
queryname=cols[0]
if uniqueprotIDs.has_key(ID[1]):
continue
else:
uniqueprotIDs[uniprotIDs[cols[0]][0]]=''
print 'Read in swissprot blast ...'
print '%s%i' %('Number of good swissprot matches: ', sprotmatch)
for line in trembl:
line=line.rstrip()
if line[0] == '#':
continue
else:
cols=line.split('\t')
if cols[0] == queryname:
continue
else:
# if float(cols[10]) <= evalue and cols[1].split('|')[2].split('_')[1] != 'NEMVE': #for parsing based on threshold value
if float(cols[10]) <= evalue: #for parsing based on threshold value
ID=cols[1].split('|')
if uniprotIDs.has_key(cols[0]):
uniprotIDs[cols[0]].append(ID[1])
queryname=cols[0]
tremblpeats+=1
else:
uniprotIDs[cols[0]]=uniprotIDs.get(cols[0],[])
uniprotIDs[cols[0]].append(ID[1])
if innames.has_key(cols[0]):
innames[cols[0]]+='\t%s\t%s\t%s' %(ID[1],cols[2],cols[10])
queryname=cols[0]
tremblmatch+=1
if uniqueprotIDs.has_key(uniprotIDs[cols[0]][0]):
continue
else:
uniqueprotIDs[uniprotIDs[cols[0]][0]]=''
print 'Read in TrEMBL blast ...'
print '%s%i'%('Number of repeat matches from TrEMBL: ', tremblpeats)
print '%s%i'%('Number of additional good matches from TrEMBL: ', tremblmatch)
print '%s%i' %('flatfilesneeded: ',len(uniqueprotIDs.keys()))
return uniprotIDs, uniqueprotIDs
#this line calls the uniprot reading function
uniprotIDs, uniquesforflats=read_uniprot(sys.argv[4], sys.argv[5])
print 'downloading flat files ...'
#this loop downloads all the uniprot flat files for the list of unique uniprotIDs that was parsed from the blast results
for key, value in uniquesforflats.items():
if os.path.exists('./'+directory+'/'+key+'.txt'): #thanks JTL for this addition!
continue
else:
urllib.urlretrieve('http://www.uniprot.org/uniprot/'+key+'.txt', './'+directory+'/'+key+'.txt')
print 'extracting relevant info from flat files ...'
print 'don\'t worry this takes awhile ...'
########this function extracts the relevant information from each individual flat file
def extractGO(contigname):
if uniprotIDs.has_key(contigname):
flatfile = open('./'+directory+'/'+uniprotIDs[contigname][0]+'.txt','r')
ID='No_ID'
DE='No_description'
description=0
KEGG='No_KEGG'
KEGGKO='No_KEGGKO'
flatfiledict={}
GOcodes=''
GOBP=''
GOMF=''
GOCC=''
keywords=''
for line in flatfile:
line=line.rstrip()
if line[0:2] == 'ID':
line=line.split(' ')
ID=line[3]
if line[0:2] == 'DE' and description == 0:
line=line.split('=')
DE=line[1][:-1]
description +=1
if line[0:2] == 'DR':
if line[5:9] == 'KEGG':
line=line.split(';')
KEGG=line[1].strip()
if line[5:7] == 'KO':
line=line.split(';')
KEGGKO=line[1].strip()
if line[5:7] == 'GO':
line=line.split(';')
if GOcodes == '':
GOcodes+='%s' %(line[1].strip())
else:
GOcodes+=' // %s' %(line[1].strip())
if line[2].strip().split(':')[0] == 'C':
GOCC+='%s (%s);' %(line[2].strip().split(':')[1], line[1].strip())
if line[2].strip().split(':')[0] == 'P':
GOBP+='%s (%s);' %(line[2].strip().split(':')[1], line[1].strip())
if line[2].strip().split(':')[0] == 'F':
GOMF+='%s (%s);' %(line[2].strip().split(':')[1], line[1].strip())
if line[0:2] == 'KW':
line=line[2:].split(';')
for item in line:
if item == '':
continue
else:
keywords+='%s;' %(item.strip())
if GOcodes=='':
GOcodes='No_GOcodes'
if GOBP=='':
GOBP='No_GOBP'
if GOMF=='':
GOMF='No_GOMF'
if GOCC=='':
GOCC='No_GOCC'
if keywords=='':
keywords='No_keywords'
outstring='\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' %(ID, DE, KEGG, KEGGKO, GOcodes, GOBP, GOMF, GOCC, keywords)
nomatch=0
else:
nomatch=1
outstring='\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' %('No_Uniprotmatch','No_%_identity','No_evalue','No_ID','No_Description','No_KEGG', 'No_KEGGKO','No_GO','No_GOCC','No_GOBP','No_GOMF','No_keywords')
return outstring, nomatch
notmatched=0
extractingcounter=0
#####This loop calls the extraction function for each contig that has a uniprot match
for key, value in innames.items():
extractingcounter+=1
outstring, nomatch = extractGO(key)
innames[key]+=outstring
notmatched+=nomatch
o=open(str(sys.argv[8]), 'w') # New data table file name
o.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' %('ContigName', 'ContigLength', 'topnrMatch','topnrEvalue', 'nobadwordnrMatch', 'nobadwordnrEvalue','Uniprotmatch','%_identity','evalue','ID','Description','KEGG', 'KEGGKO','GO','GOCC','GOBP','GOMF','Keywords')) #used if you want a specific header and filename for each file
print '%s%i' %('Hits not matched in sprot: ', notmatched)
print 'compiling extracted information ...'
############this if for sorting your contigs based on text order#############
if textorcoralsort == 'text':
l=[]
for key,value in innames.items():
l.append((key,value))
l.sort()
for item in l:
o.write('%s\t%s\n' % (item[0], item[1])) #writes each line of the tuple as separate tab delimited text
o.close()
#############this is for sorting your contigs based on our coral specific contig names##############
if textorcoralsort == 'coral':
l=[]
joinedcontigcounter=600247
for key,value in innames.items():
name=key.split(' ')
if name[0][0:6]=='contig':
newname=name[0].split('_')
if len(newname)==1:
num=int(newname[0][6:])
if len(newname)>1:
joinedcontigcounter+=1
num=joinedcontigcounter
if name[0][0:6]=='c_sym_':
newname=name[0].split('_')
num=700000+int(newname[2])
if name[0][0:6]=='d_sym_':
newname=name[0].split('_')
num=900000+int(newname[2])
l.append((num,key,value))
l.sort()
for item in l:
# print item
o.write('%s\t%s\n' % (item[1], item[2])) #writes each line of the tuple as separate tab delimited text
o.close()
|
__author__ = 'adrian.suciu'
import urllib.request
import sys
import os
import json
import random
import time
import urllib.parse as urlparse
from common import log
import common
import db
import settings
client_id = 0
client_secret = 0
current_token = {}
def get_token_from_imgur(param):
json_data = json.dumps(param).encode('utf8')
global current_token
try:
print(json_data)
req = urllib.request.urlopen("https://api.imgur.com/oauth2/token", data=json_data)
json_data = json.loads(req.read().decode('utf-8'))
print(str(json_data))
current_token = {'access_token': json_data['access_token'], 'refresh_token': json_data['refresh_token'],
'timestamp': time.time(), "expires_in": json_data['expires_in'],
"account_username": json_data['account_username']}
return True
except (urllib.request.URLError, urllib.request.HTTPError):
print("Token cannot be refreshed due to HTTP Exception: " + (str(sys.exc_info())))
return False
def get_token_from_pin(pin):
params = {"client_id": client_id,
"client_secret": client_secret,
"grant_type": "pin",
"pin": pin}
retval = get_token_from_imgur(params)
write_token_to_file()
return retval
def get_token_from_link(link):
parsed = urlparse.urlparse(link)
print(urlparse.parse_qs(parsed.query))
global current_token
json_data = urlparse.parse_qs(parsed.fragment)
current_token = {'access_token': json_data['access_token'][0], 'refresh_token': json_data['refresh_token'][0],
'timestamp': time.time(), "expires_in": json_data['expires_in'][0],
"account_username": json_data['account_username'][0]}
write_token_to_file()
return True
def refresh_token():
params = {"refresh_token": current_token['refresh_token'],
"client_id": client_id,
"client_secret": client_secret,
"grant_type": 'refresh_token'
}
retval = get_token_from_imgur(params)
write_token_to_file()
return retval
def write_token_to_file():
output = json.dumps(current_token)
# with open("token", "w") as f:
# f.write(output)
db.update(settings.imgurtoken_db,"token",output)
def read_token_from_file(filename):
global current_token
# with open(filename) as file:
# current_token = json.loads(file.read())
current_token=json.loads(db.select(settings.imgurtoken_db,"token")["token"])
def get_token():
# we consider token expired if 3/4 of it's expiration time was reached
if not current_token:
return False
token_expiration_timestamp = (int(current_token['timestamp'])) + ((int(current_token['expires_in'])) * 3 / 4)
if time.time() > token_expiration_timestamp:
refresh_succesful = refresh_token()
if refresh_succesful:
write_token_to_file()
else:
return False
return True
def build_header():
global current_token
result = get_token()
if result:
# logged in
return {"Authorization": ("Bearer " + current_token['access_token'])}
else:
# not logged in
return {"Authorization": ("Client-ID " + str(client_id))}
def logged_in():
return get_token()
def logout():
try:
#os.remove('token')
db.update(settings.imgurtoken_db,"token","{}")
global current_token
current_token = {}
except OSError:
pass
def get_bot_username():
return current_token['account_username'] if current_token else "not logged in"
def get_bot_imgur_profile():
return current_token['account_username'] + ".imgur.com/all" if current_token else "not logged in"
def init():
global client_id, client_secret
with open("imgurtoken", "r") as f:
content = f.read().splitlines()
try:
client_id = os.environ.get('imgur_id', content[0])
client_secret = os.environ.get('imgur_secret', content[1])
except IndexError:
client_id = os.environ.get('imgur_id', "")
client_secret = os.environ.get('imgur_secret', "")
try:
read_token_from_file("token")
except FileNotFoundError:
print("Refresh token not available. Login via bot")
def imgur_pic(request):
if type(request) == list and len(request) == 2:
req = urllib.request.Request("https://api.imgur.com/3/gallery/r/" + request[1] + "/top/week",
headers=build_header())
log("logged in as %s" % get_bot_username())
response = common.send_http_query(req) # urllib.request.urlopen(req).read()
if response:
json_data = json.loads(response.decode('utf-8'))
if json_data['data']: # data is available
link_id = random.randint(0, len(json_data['data']) - 1)
retval = (str(json_data['data'][link_id]['title']) + " - "
+ str(json_data['data'][link_id]['link'] +
("v" if (str(json_data['data'][link_id]['link']).endswith(".gif")) else "")))
return retval
else:
return "images in subreddit '%s' not found in the past day" % request[1]
else:
return "internal error. please try again."
else:
return "Wrong number of parameters. Usage /getpic [subreddit]"
def login_imgur(request):
if len(request) == 1:
return "Go to the following website: \n" \
"https://api.imgur.com/oauth2/authorize?client_id=%s&response_type=token\n" \
"use command /login_imgur link" % client_id
elif len(request) == 2:
#result = get_token_from_pin(request[1])
result = get_token_from_link(request[1])
if result:
return "Logged in as: " + get_bot_username()
else:
return "Login failed. Imgur API might be down, or wrong pin code provided. Please try again"
else:
return "Wrong number of parameters. Usage /login_imgur"
# noinspection PyUnusedLocal
def login_status_imgur(request):
if get_token():
return "Logged in as: " + get_bot_username() + "\n" + \
"Full gallery can be viewed at: " + get_bot_imgur_profile()
else:
return "Not logged in"
# noinspection PyUnusedLocal
def logout_imgur(request):
logout()
return "The bot has successfully logged out of Imgur"
|
import math
from functools import reduce, update_wrapper, wraps
from inspect import signature
from itertools import accumulate, chain, repeat
from typing import Callable
from ..utils.cells import combine_cells, fix_cells, mark_graphemes, split_graphemes
def spinner_player(spinner):
"""Create an infinite generator that plays all cycles of a spinner indefinitely."""
def inner_play():
while True:
yield from spinner() # instantiates a new cycle in each iteration.
return inner_play() # returns an already initiated generator.
def bordered(borders, default):
"""Decorator to include controllable borders in the outputs of a function."""
def wrapper(fn):
@wraps(fn)
def inner_bordered(*args, **kwargs):
content, right = fn(*args, **kwargs)
return combine_cells(left_border, content, right or right_border)
return inner_bordered
left_border, right_border = extract_fill_graphemes(borders, default)
return wrapper
def extract_fill_graphemes(text, default):
"""Extract the exact same number of graphemes as default, filling missing ones."""
text, default = (tuple(split_graphemes(c or '') for c in p) for p in (text or default, default))
return (mark_graphemes(t or d) for t, d in zip(chain(text, repeat('')), default))
def static_sliding_window(sep, gap, contents, length, right, initial):
"""Implement a sliding window over some content interspersed with a separator.
It is very efficient, storing data in only one string.
Note that the implementation is "static" in the sense that the content is pre-
calculated and maintained static, but actually when the window slides both the
separator and content seem to be moved.
Also keep in mind that `right` is for the content, not the window.
"""
def sliding_window():
pos = initial
while True:
if pos < 0:
pos += original
elif pos >= original:
pos -= original
yield content[pos:pos + length]
pos += step
adjusted_sep = fix_cells((sep * math.ceil(gap / len(sep)))[:gap]) if gap else ''
content = tuple(chain.from_iterable(chain.from_iterable(zip(repeat(adjusted_sep), contents))))
original, step = len(content), -1 if right else 1
assert length <= original, f'window slides inside content, {length} must be <= {original}'
content += content[:length]
return sliding_window()
def overlay_sliding_window(background, gap, contents, length, right, initial):
"""Implement a sliding window over some content on top of a background.
It uses internally a static sliding window, but dynamically swaps the separator
characters for the background ones, thus making it appear immobile, with the
contents sliding over it.
"""
def overlay_window():
for cells in window: # pragma: no cover
yield tuple(b if c == '\0' else c for c, b in zip(cells, background))
background = (background * math.ceil(length / len(background)))[:length]
window = static_sliding_window('\0', gap, contents, length, right, initial)
return overlay_window()
def combinations(nums):
"""Calculate the number of total combinations a few spinners should have together,
can be used for example with cycles or with frames played at the same time."""
def lcm(a, b):
"""Calculate the lowest common multiple of two numbers."""
return a * b // math.gcd(a, b)
return reduce(lcm, nums)
def split_options(options):
"""Split options that apply to dual elements, either duplicating or splitting."""
return options if isinstance(options, tuple) else (options, options)
def spread_weighted(actual_length, naturals):
"""Calculate the weighted spreading of the available space for all natural lengths."""
total = sum(naturals)
lengths = (actual_length / total * n for n in naturals)
lengths = [round(x) for x in accumulate(lengths)] # needs to be resolved.
lengths = tuple(map(lambda a, b: a - b, lengths, [0] + lengths))
assert sum(lengths) == actual_length
return lengths
def fix_signature(func: Callable, source: Callable, skip_n_params: int):
"""Override signature to hide first n parameters."""
original_doc = func.__doc__
update_wrapper(func, source)
if original_doc:
func.__doc__ = f'{original_doc}\n{func.__doc__}'
sig = signature(func)
sig = sig.replace(parameters=tuple(sig.parameters.values())[skip_n_params:])
func.__signature__ = sig
return func
def round_even(n):
"""Round a number to the nearest even integer."""
r = int(n)
return r + 1 if r & 1 else r
|
from __future__ import division
from models import *
from utils.utils import *
from utils.datasets import *
import os
import sys
import time
import datetime
import argparse
from PIL import Image
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
def run(opt, image_folder):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
os.makedirs("output", exist_ok=True)
# Set up model
model = Darknet(opt.model_def, img_size=opt.img_size).to(device)
if opt.weights_path.endswith(".weights"):
# Load darknet weights
model.load_darknet_weights(opt.weights_path)
else:
# Load checkpoint weights
model.load_state_dict(torch.load(opt.weights_path))
model.eval() # Set in evaluation mode
img_folder = os.path.join(image_folder, 'images')
dataloader = DataLoader(
ImageFolder(img_folder, img_size=opt.img_size),
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_cpu,
)
classes = load_classes(opt.class_path) # Extracts class labels from file
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
## TODO : make class id assigned autometically from rolo images
target_cls = []
with open(os.path.join(image_folder,'class.txt'), 'r') as file:
try:
for n in file.readlines():
target_cls.append(int(n))
except ValueError:
print(target_cls)
gt_list = []
with open(os.path.join(image_folder,'groundtruth_rect.txt'), 'r') as file:
labels = file.readlines()
for label in labels:
l = label.split('\t') # for gt type 2
if len(l) < 4:
l = label.split(',') # for gt type 1
gt_list.append(l)
# Saving folder
foldername = os.path.join(image_folder,'yot_out')
if not os.path.exists(foldername):
os.makedirs(foldername)
print("\nPerforming object detection:")
prev_time = time.time()
for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
# Configure input
input_imgs = Variable(input_imgs.type(Tensor))
# Get detections
with torch.no_grad():
(detections, colimg) = model(input_imgs)
detection_list = non_max_suppression(detections, opt.conf_thres, opt.nms_thres)
# Log progress
current_time = time.time()
inference_time = datetime.timedelta(seconds=current_time - prev_time)
prev_time = current_time
print("\t+ Batch %d, Inference Time: %s" % (batch_i, inference_time))
for detections in detection_list:
img = np.array(Image.open(img_paths[0]))
# Find a location with the selected class ID
location = np.array([0.5,0.5,0,0,0], dtype=float) #np.zeros(5, dtype=float)
if detections is not None:
# Rescale boxes to original image
detections = rescale_boxes(detections, opt.img_size, img.shape[:2])
unique_labels = detections[:, -1].cpu().unique()
n_cls_preds = len(unique_labels)
max_iou = 0 #opt.tracking_thres
for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
if int(cls_pred) not in target_cls:
print("\t\tclass id :", cls_pred)
continue
print("\t+ Label: %s, Conf: %.5f" % (classes[int(cls_pred)], cls_conf.item()))
box_w = x2 - x1
box_h = y2 - y1
cx = x1 + box_w/2
cy = y1 + box_h/2
b1 = torch.tensor([[cx, cy, box_w, box_h]])
b2 = np.array(gt_list[batch_i], dtype=float)
b2[0] += b2[2]/2.
b2[1] += b2[3]/2.
b2 = torch.tensor([b2], dtype=torch.float32)
iou = bbox_iou(b1, b2, False)
if iou >= max_iou:
max_iou = iou
# Normalize the coordinates with image width and height and class id
# [cx, cy, width, heigt, confidence, class id]
location[0] = cx/img.shape[1]
location[1] = cy/img.shape[0]
location[2] = box_w/img.shape[1]
location[3] = box_h/img.shape[0]
location[4] = conf
# save a location and a feature image
filename = img_paths[0].split("/")[-1].split(".")[0]
save = np.concatenate((colimg.reshape(-1).numpy(), location))
np.save(f"{foldername}/{filename}.npy", save)
print("\t Saving the location : ", save[-5:])
if __name__ == "__main__":
import multiprocessing
multiprocessing.set_start_method('spawn', True)
parser = argparse.ArgumentParser()
parser.add_argument("--image_folder", type=str, default="data/samples", help="path to dataset")
parser.add_argument("--model_def", type=str, default="config/yolov3.cfg", help="path to model definition file")
parser.add_argument("--weights_path", type=str, default="weights/yolov3.weights", help="path to weights file")
parser.add_argument("--class_path", type=str, default="data/coco.names", help="path to class label file")
parser.add_argument("--conf_thres", type=float, default=0.8, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression")
parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
parser.add_argument("--n_cpu", type=int, default=1, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
parser.add_argument("--checkpoint_model", type=str, help="path to checkpoint model")
parser.add_argument("--tracking_thres", type=float, default=0.2, help="path to checkpoint model")
opt = parser.parse_args()
print(opt)
f = opt.image_folder
if f.lower().endswith(('*')):
root = os.path.dirname(f)
for l in os.listdir(root):
l = os.path.join(root, l)
run(opt, l)
print(l)
else:
run(opt, f)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.