signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def forward ( self , x : torch . Tensor , mask : torch . Tensor ) -> torch . Tensor :
"""Follow Figure 1 ( left ) for connections ."""
|
x = self . sublayer [ 0 ] ( x , lambda x : self . self_attn ( x , x , x , mask ) )
return self . sublayer [ 1 ] ( x , self . feed_forward )
|
def get_adjusted_value ( self , asset , field , dt , perspective_dt , data_frequency , spot_value = None ) :
"""Returns a scalar value representing the value
of the desired asset ' s field at the given dt with adjustments applied .
Parameters
asset : Asset
The asset whose data is desired .
field : { ' open ' , ' high ' , ' low ' , ' close ' , ' volume ' , ' price ' , ' last _ traded ' }
The desired field of the asset .
dt : pd . Timestamp
The timestamp for the desired value .
perspective _ dt : pd . Timestamp
The timestamp from which the data is being viewed back from .
data _ frequency : str
The frequency of the data to query ; i . e . whether the data is
' daily ' or ' minute ' bars
Returns
value : float , int , or pd . Timestamp
The value of the given ` ` field ` ` for ` ` asset ` ` at ` ` dt ` ` with any
adjustments known by ` ` perspective _ dt ` ` applied . The return type is
based on the ` ` field ` ` requested . If the field is one of ' open ' ,
' high ' , ' low ' , ' close ' , or ' price ' , the value will be a float . If
the ` ` field ` ` is ' volume ' the value will be a int . If the ` ` field ` `
is ' last _ traded ' the value will be a Timestamp ."""
|
if spot_value is None : # if this a fetcher field , we want to use perspective _ dt ( not dt )
# because we want the new value as of midnight ( fetcher only works
# on a daily basis , all timestamps are on midnight )
if self . _is_extra_source ( asset , field , self . _augmented_sources_map ) :
spot_value = self . get_spot_value ( asset , field , perspective_dt , data_frequency )
else :
spot_value = self . get_spot_value ( asset , field , dt , data_frequency )
if isinstance ( asset , Equity ) :
ratio = self . get_adjustments ( asset , field , dt , perspective_dt ) [ 0 ]
spot_value *= ratio
return spot_value
|
def update ( self ) :
"""Update the current GPS location ."""
|
self . _controller . update ( self . _id , wake_if_asleep = False )
data = self . _controller . get_drive_params ( self . _id )
if data :
self . __longitude = data [ 'longitude' ]
self . __latitude = data [ 'latitude' ]
self . __heading = data [ 'heading' ]
if self . __longitude and self . __latitude and self . __heading :
self . __location = { 'longitude' : self . __longitude , 'latitude' : self . __latitude , 'heading' : self . __heading }
|
def restart ( name , timeout = 90 , with_deps = False , with_parents = False ) :
'''Restart the named service . This issues a stop command followed by a start .
Args :
name : The name of the service to restart .
. . note : :
If the name passed is ` ` salt - minion ` ` a scheduled task is
created and executed to restart the salt - minion service .
timeout ( int ) :
The time in seconds to wait for the service to stop and start before
returning . Default is 90 seconds
. . note : :
The timeout is cumulative meaning it is applied to the stop and
then to the start command . A timeout of 90 could take up to 180
seconds if the service is long in stopping and starting
. . versionadded : : 2017.7.9,2018.3.4
with _ deps ( bool ) :
If enabled restart the given service and the services
the current service depends on .
with _ parents ( bool ) :
If enabled and in case other running services depend on the to be
restarted service , this flag indicates that those other services
will be restarted as well .
If disabled , the service restart will fail in case other running
services depend on the to be restarted service .
Returns :
bool : ` ` True ` ` if successful , otherwise ` ` False ` `
CLI Example :
. . code - block : : bash
salt ' * ' service . restart < service name >'''
|
if 'salt-minion' in name :
create_win_salt_restart_task ( )
return execute_salt_restart_task ( )
ret = set ( )
ret . add ( stop ( name = name , timeout = timeout , with_deps = with_deps , with_parents = with_parents ) )
ret . add ( start ( name = name , timeout = timeout , with_deps = with_deps , with_parents = with_parents ) )
return False not in ret
|
def cast_value ( self , value , constraints = True ) :
"""https : / / github . com / frictionlessdata / tableschema - py # field"""
|
# Null value
if value in self . __missing_values :
value = None
# Cast value
cast_value = value
if value is not None :
cast_value = self . __cast_function ( value )
if cast_value == config . ERROR :
raise exceptions . CastError ( ( 'Field "{field.name}" can\'t cast value "{value}" ' 'for type "{field.type}" with format "{field.format}"' ) . format ( field = self , value = value ) )
# Check value
if constraints :
for name , check in self . __check_functions . items ( ) :
if isinstance ( constraints , list ) :
if name not in constraints :
continue
passed = check ( cast_value )
if not passed :
raise exceptions . CastError ( ( 'Field "{field.name}" has constraint "{name}" ' 'which is not satisfied for value "{value}"' ) . format ( field = self , name = name , value = value ) )
return cast_value
|
def reset_password ( self , user , password ) :
"""Service method to reset a user ' s password . The same as : meth : ` change _ password `
except we this method sends a different notification email .
Sends signal ` password _ reset ` .
: param user :
: param password :
: return :"""
|
user . password = password
self . user_manager . save ( user )
if app . config . SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL :
self . send_mail ( _ ( 'flask_unchained.bundles.security:email_subject.password_reset_notice' ) , to = user . email , template = 'security/email/password_reset_notice.html' , user = user )
password_reset . send ( app . _get_current_object ( ) , user = user )
|
def _preprocess_successor ( self , state , add_guard = True ) : # pylint : disable = unused - argument
"""Preprocesses the successor state .
: param state : the successor state"""
|
# Next , simplify what needs to be simplified
if o . SIMPLIFY_EXIT_STATE in state . options :
state . solver . simplify ( )
if o . SIMPLIFY_EXIT_GUARD in state . options :
state . scratch . guard = state . solver . simplify ( state . scratch . guard )
if o . SIMPLIFY_EXIT_TARGET in state . options :
state . scratch . target = state . solver . simplify ( state . scratch . target )
# unwrap stuff from SimActionObjects
state . scratch . target = _raw_ast ( state . scratch . target )
state . scratch . guard = _raw_ast ( state . scratch . guard )
# apply the guard constraint and new program counter to the state
if add_guard :
state . add_constraints ( state . scratch . guard )
# trigger inspect breakpoints here since this statement technically shows up in the IRSB as the " next "
state . regs . ip = state . scratch . target
# For architectures with no stack pointer , we can ' t manage a callstack . This has the side effect of breaking
# SimProcedures that call out to binary code self . call .
if self . initial_state . arch . sp_offset is not None and not isinstance ( state . arch , ArchSoot ) :
self . _manage_callstack ( state )
if len ( self . successors ) != 0 : # This is a fork !
state . _inspect ( 'fork' , BP_AFTER )
# clean up the state
state . options . discard ( o . AST_DEPS )
state . options . discard ( o . AUTO_REFS )
|
def funcSmthSpt ( aryFuncChnk , varSdSmthSpt ) :
"""Apply spatial smoothing to the input data .
Parameters
aryFuncChnk : np . array
TODO
varSdSmthSpt : float ( ? )
Extent of smoothing .
Returns
aryFuncChnk : np . array
Smoothed data ."""
|
varNdim = aryFuncChnk . ndim
# Number of time points in this chunk :
varNumVol = aryFuncChnk . shape [ - 1 ]
# Loop through volumes :
if varNdim == 4 :
for idxVol in range ( 0 , varNumVol ) :
aryFuncChnk [ : , : , : , idxVol ] = gaussian_filter ( aryFuncChnk [ : , : , : , idxVol ] , varSdSmthSpt , order = 0 , mode = 'nearest' , truncate = 4.0 )
elif varNdim == 5 :
varNumMtnDrctns = aryFuncChnk . shape [ 3 ]
for idxVol in range ( 0 , varNumVol ) :
for idxMtn in range ( 0 , varNumMtnDrctns ) :
aryFuncChnk [ : , : , : , idxMtn , idxVol ] = gaussian_filter ( aryFuncChnk [ : , : , : , idxMtn , idxVol ] , varSdSmthSpt , order = 0 , mode = 'nearest' , truncate = 4.0 )
# Output list :
return aryFuncChnk
|
def RemoveSession ( self , session_path ) :
'''OBEX method to remove an existing transfer session .
This takes the path of the transfer Session object and removes it .'''
|
manager = mockobject . objects [ '/' ]
# Remove all the session ' s transfers .
transfer_id = 0
while session_path + '/transfer' + str ( transfer_id ) in mockobject . objects :
transfer_path = session_path + '/transfer' + str ( transfer_id )
transfer_id += 1
self . RemoveObject ( transfer_path )
manager . EmitSignal ( OBJECT_MANAGER_IFACE , 'InterfacesRemoved' , 'oas' , [ dbus . ObjectPath ( transfer_path ) , [ TRANSFER_IFACE ] , ] )
# Remove the session itself .
self . RemoveObject ( session_path )
manager . EmitSignal ( OBJECT_MANAGER_IFACE , 'InterfacesRemoved' , 'oas' , [ dbus . ObjectPath ( session_path ) , [ SESSION_IFACE , PHONEBOOK_ACCESS_IFACE ] , ] )
|
def create_host_template ( self , name ) :
"""Creates a host template .
@ param name : Name of the host template to create .
@ return : An ApiHostTemplate object ."""
|
return host_templates . create_host_template ( self . _get_resource_root ( ) , name , self . name )
|
def _to_iplot ( self , colors = None , colorscale = None , kind = 'scatter' , mode = 'lines' , interpolation = 'linear' , symbol = 'dot' , size = '12' , fill = False , width = 3 , dash = 'solid' , sortbars = False , keys = False , bestfit = False , bestfit_colors = None , opacity = 0.6 , mean = False , mean_colors = None , asDates = False , asTimestamp = False , text = None , ** kwargs ) :
"""Generates a plotly Data object
Parameters
colors : list or dict
{ key : color } to specify the color for each column
[ colors ] to use the colors in the defined order
colorscale : str
Color scale name
Only valid if ' colors ' is null
See cufflinks . colors . scales ( ) for available scales
kind : string
Kind of chart
scatter
bar
mode : string
Plotting mode for scatter trace
lines
markers
lines + markers
lines + text
markers + text
lines + markers + text
interpolation : string
Positioning of the connecting lines
linear
spline
vhv
hvh
vh
hv
symbol : string
The symbol that is drawn on the plot for each marker
Valid only when mode includes markers
circle
circle - dot
diamond
square
and many more . . . ( see plotly . validators . scatter . marker . SymbolValidator . values )
size : string or int
Size of marker
Valid only if marker in mode
fill : bool
Filled Traces
width : int
Line width
dash : string
Drawing style of lines
solid
dash
dashdot
dot
sortbars : boole
Sort bars in descending order
* Only valid when kind = ' bar '
keys : list of columns
List of columns to chart .
Also can be usded for custom sorting .
bestfit : boolean or list
If True then a best fit line will be generated for
all columns .
If list then a best fit line will be generated for
each key on the list .
bestfit _ colors : list or dict
{ key : color } to specify the color for each column
[ colors ] to use the colors in the defined order
asDates : bool
If true it forces truncates times from a DatetimeIndex"""
|
df = self . copy ( )
if asTimestamp :
x = [ _ for _ in df . index ]
elif df . index . __class__ . __name__ in ( 'PeriodIndex' , 'DatetimeIndex' ) :
if asDates :
df . index = df . index . date
x = df . index . format ( )
elif isinstance ( df . index , pd . MultiIndex ) :
x = [ '({0})' . format ( ',' . join ( [ str ( __ ) for __ in _ ] ) ) for _ in df . index . values ]
else :
x = df . index . values
lines = { }
if type ( df ) == pd . core . series . Series :
df = pd . DataFrame ( { df . name : df } )
if not keys :
if 'bar' in kind :
if sortbars :
keys = list ( df . sum ( ) . sort_values ( ascending = False ) . keys ( ) )
else :
keys = list ( df . keys ( ) )
else :
keys = list ( df . keys ( ) )
colors = get_colors ( colors , colorscale , keys )
dash = get_items_as_list ( dash , keys , 'dash' )
symbol = get_items_as_list ( symbol , keys , 'symbol' )
mode = get_items_as_list ( mode , keys , 'mode' )
interpolation = get_items_as_list ( interpolation , keys , 'interpolation' )
width = get_items_as_list ( width , keys , 'width' )
for key in keys :
lines [ key ] = { }
lines [ key ] [ "x" ] = x
lines [ key ] [ "y" ] = df [ key ] . fillna ( '' ) . values
lines [ key ] [ "name" ] = str ( key )
if text is not None :
lines [ key ] [ "text" ] = text
if 'bar' in kind :
lines [ key ] [ "marker" ] = { 'color' : to_rgba ( colors [ key ] , opacity ) , 'line' : { 'color' : colors [ key ] , 'width' : 1 } }
else :
lines [ key ] [ "line" ] = { 'color' : colors [ key ] , 'width' : width [ key ] , 'dash' : dash [ key ] , 'shape' : interpolation [ key ] }
lines [ key ] [ "mode" ] = mode [ key ]
if 'marker' in mode [ key ] :
lines [ key ] [ "marker" ] = dict ( symbol = symbol [ key ] , size = size )
if fill :
lines [ key ] [ "fill" ] = 'tonexty' if kind == 'area' else 'tozeroy'
lines [ key ] [ "fillcolor" ] = to_rgba ( colors [ key ] , kwargs [ 'opacity' ] if 'opacity' in kwargs else .3 )
if 'bar' in kind :
lines_plotly = [ Bar ( lines [ key ] ) . to_plotly_json ( ) for key in keys ]
else :
lines_plotly = [ Scatter ( lines [ key ] ) . to_plotly_json ( ) for key in keys ]
for trace in lines_plotly :
if isinstance ( trace [ 'name' ] , pd . Timestamp ) :
trace . update ( name = str ( trace [ 'name' ] ) )
if bestfit :
if type ( bestfit ) == list :
keys = bestfit
d = { }
for key in keys :
bestfit = df [ key ] . bestfit ( )
d [ bestfit . formula ] = bestfit
bestfit_lines = pd . DataFrame ( d ) . to_iplot ( bestfit = False , colors = bestfit_colors , kind = 'scatter' , asTimestamp = asTimestamp )
for line in bestfit_lines :
line [ 'line' ] [ 'dash' ] = 'dash'
if not bestfit_colors :
line [ 'line' ] [ 'color' ] = to_rgba ( line [ 'line' ] [ 'color' ] , .6 )
data = lines_plotly
data . extend ( bestfit_lines )
return data
if mean :
if type ( mean ) == list :
keys = mean
d = { }
for key in keys :
mean = df [ key ] . mean ( )
d [ 'MEAN({key})' . format ( key = key ) ] = pd . Series ( [ mean ] * len ( df [ key ] ) , index = df [ key ] . index )
mean_lines = pd . DataFrame ( d ) . to_iplot ( mean = False , colors = mean_colors , kind = 'scatter' , asTimestamp = asTimestamp )
for line in mean_lines :
line [ 'line' ] [ 'dash' ] = 'dash'
if not mean_colors :
line [ 'line' ] [ 'color' ] = to_rgba ( line [ 'line' ] [ 'color' ] , .6 )
data = [ lines_plotly ]
data . extend ( mean_lines )
return data
return lines_plotly
|
def radec2sky ( ra , dec ) :
"""Convert [ ra ] , [ dec ] to [ ( ra [ 0 ] , dec [ 0 ] ) , . . . . ]
and also ra , dec to [ ( ra , dec ) ] if ra / dec are not iterable
Parameters
ra , dec : float or iterable
Sky coordinates
Returns
sky : numpy . array
array of ( ra , dec ) coordinates ."""
|
try :
sky = np . array ( list ( zip ( ra , dec ) ) )
except TypeError :
sky = np . array ( [ ( ra , dec ) ] )
return sky
|
def get_real_field ( model , field_name ) :
'''Get the real field from a model given its name .
Handle nested models recursively ( aka . ` ` _ _ ` ` lookups )'''
|
parts = field_name . split ( '__' )
field = model . _meta . get_field ( parts [ 0 ] )
if len ( parts ) == 1 :
return model . _meta . get_field ( field_name )
elif isinstance ( field , models . ForeignKey ) :
return get_real_field ( field . rel . to , '__' . join ( parts [ 1 : ] ) )
else :
raise Exception ( 'Unhandled field: %s' % field_name )
|
def update_infos ( self , forced = False , test = False ) :
"""Update satellite info each self . polling _ interval seconds
so we smooth arbiter actions for just useful actions .
Raise a satellite update status Brok
If forced is True , then ignore the ping period . This is used when the configuration
has not yet been dispatched to the Arbiter satellites .
If test is True , do not really ping the daemon ( useful for the unit tests only )
: param forced : ignore the ping smoothing
: type forced : bool
: param test :
: type test : bool
: return :
None if the last request is too recent ,
False if a timeout was raised during the request ,
else the managed configurations dictionary"""
|
logger . debug ( "Update informations, forced: %s" , forced )
# First look if it ' s not too early to ping
now = time . time ( )
if not forced and self . last_check and self . last_check + self . polling_interval > now :
logger . debug ( "Too early to ping %s, ping period is %ds!, last check: %d, now: %d" , self . name , self . polling_interval , self . last_check , now )
return None
self . get_conf ( test = test )
# Update the daemon last check timestamp
self . last_check = time . time ( )
# Update the state of this element
self . broks . append ( self . get_update_status_brok ( ) )
return self . cfg_managed
|
def module ( self ) :
"""The module specified by the ` ` library ` ` attribute ."""
|
if self . _module is None :
if self . library is None :
raise ValueError ( "Backend '%s' doesn't specify a library attribute" % self . __class__ )
try :
if '.' in self . library :
mod_path , cls_name = self . library . rsplit ( '.' , 1 )
mod = import_module ( mod_path )
self . _module = getattr ( mod , cls_name )
else :
self . _module = import_module ( self . library )
except ( AttributeError , ImportError ) :
raise ValueError ( "Couldn't load %s backend library" % cls_name )
return self . _module
|
def sanitize_args ( args ) :
"""args may need a bunch of logic to set proper defaults that argparse is
not well suited for ."""
|
if args . release is None :
args . release = 'nautilus'
args . default_release = True
# XXX This whole dance is because - - stable is getting deprecated
if args . stable is not None :
LOG . warning ( 'the --stable flag is deprecated, use --release instead' )
args . release = args . stable
# XXX Tango ends here .
return args
|
def set_id_in_fkeys ( cls , payload ) :
"""Looks for any keys in the payload that end with either _ id or _ ids , signaling a foreign
key field . For each foreign key field , checks whether the value is using the name of the
record or the actual primary ID of the record ( which may include the model abbreviation , i . e .
B - 1 ) . If the former case , the name is replaced with
the record ' s primary ID .
Args :
payload : ` dict ` . The payload to POST or PATCH .
Returns :
` dict ` . The payload ."""
|
for key in payload :
val = payload [ key ]
if not val :
continue
if key . endswith ( "_id" ) :
model = getattr ( THIS_MODULE , cls . FKEY_MAP [ key ] )
rec_id = model . replace_name_with_id ( name = val )
payload [ key ] = rec_id
elif key . endswith ( "_ids" ) :
model = getattr ( THIS_MODULE , cls . FKEY_MAP [ key ] )
rec_ids = [ ]
for v in val :
rec_id = model . replace_name_with_id ( name = v )
rec_ids . append ( rec_id )
payload [ key ] = rec_ids
return payload
|
def compare_password ( expected , actual ) :
"""Compare two 64byte encoded passwords ."""
|
if expected == actual :
return True , "OK"
msg = [ ]
ver_exp = expected [ - 8 : ] . rstrip ( )
ver_act = actual [ - 8 : ] . rstrip ( )
if expected [ : - 8 ] != actual [ : - 8 ] :
msg . append ( "Password mismatch" )
if ver_exp != ver_act :
msg . append ( "asterisk_mbox version mismatch. Client: '" + ver_act + "', Server: '" + ver_exp + "'" )
return False , ". " . join ( msg )
|
def rprojs ( self ) :
"""Return the projected ( in xy / uv plane ) radius of each element ( either
vertices or centers depending on the setting in the mesh ) with respect
to the center of the star .
NOTE : unscaled
( ComputedColumn )"""
|
# TODO : should this be moved to Mesh ? Even though its surely possible
# to compute without being placed in orbit , projecting in x , y doesn ' t
# make much sense without LOS orientation .
rprojs = np . linalg . norm ( self . coords_for_computations [ : , : 2 ] , axis = 1 )
return ComputedColumn ( self , rprojs )
|
def get_unit_process_ids ( self , unit_processes , expect_success = True , pgrep_full = False ) :
"""Construct a dict containing unit sentries , process names , and
process IDs .
: param unit _ processes : A dictionary of Amulet sentry instance
to list of process names .
: param expect _ success : if False expect the processes to not be
running , raise if they are .
: returns : Dictionary of Amulet sentry instance to dictionary
of process names to PIDs ."""
|
pid_dict = { }
for sentry_unit , process_list in six . iteritems ( unit_processes ) :
pid_dict [ sentry_unit ] = { }
for process in process_list :
pids = self . get_process_id_list ( sentry_unit , process , expect_success = expect_success , pgrep_full = pgrep_full )
pid_dict [ sentry_unit ] . update ( { process : pids } )
return pid_dict
|
def _ord_to_str ( ordinal , weights ) :
"""Reverse function of _ str _ to _ ord ."""
|
chars = [ ]
for weight in weights :
if ordinal == 0 :
return "" . join ( chars )
ordinal -= 1
index , ordinal = divmod ( ordinal , weight )
chars . append ( _ALPHABET [ index ] )
return "" . join ( chars )
|
def add_ui ( self , klass , * args , ** kwargs ) :
'''Add an UI element for the current scene . The approach is
the same as renderers .
. . warning : : The UI api is not yet finalized'''
|
ui = klass ( self . widget , * args , ** kwargs )
self . widget . uis . append ( ui )
return ui
|
def _position_phonemes ( self , phonemes ) :
'''Mark syllable boundaries , and , in future , other positional / suprasegmental features ?'''
|
for i in range ( len ( phonemes ) ) :
phonemes [ i ] = PositionedPhoneme ( phonemes [ i ] )
phonemes [ i ] . syllable_initial = self . is_syllable_initial ( phonemes , i )
phonemes [ i ] . syllable_final = self . is_syllable_final ( phonemes , i )
return phonemes
|
def get_ea_indexed ( self ) :
"""Calculate the address for all indexed addressing modes"""
|
addr , postbyte = self . read_pc_byte ( )
# log . debug ( " \ tget _ ea _ indexed ( ) : postbyte : $ % 02x ( % s ) from $ % 04x " ,
# postbyte , byte2bit _ string ( postbyte ) , addr
rr = ( postbyte >> 5 ) & 3
try :
register_str = self . INDEX_POSTBYTE2STR [ rr ]
except KeyError :
raise RuntimeError ( "Register $%x doesn't exists! (postbyte: $%x)" % ( rr , postbyte ) )
register_obj = self . register_str2object [ register_str ]
register_value = register_obj . value
# log . debug ( " \ t % 02x = = register % s : value $ % x " ,
# rr , register _ obj . name , register _ value
if not is_bit_set ( postbyte , bit = 7 ) : # bit 7 = = 0
# EA = n , R - use 5 - bit offset from post - byte
offset = signed5 ( postbyte & 0x1f )
ea = register_value + offset
# log . debug (
# " \ tget _ ea _ indexed ( ) : bit 7 = = 0 : reg . value : $ % 04x - > ea = $ % 04x + $ % 02x = $ % 04x " ,
# register _ value , register _ value , offset , ea
return ea
addr_mode = postbyte & 0x0f
self . cycles += 1
offset = None
# TODO : Optimized this , maybe use a dict mapping . . .
if addr_mode == 0x0 : # log . debug ( " \ t0000 0x0 | , R + | increment by 1 " )
ea = register_value
register_obj . increment ( 1 )
elif addr_mode == 0x1 : # log . debug ( " \ t0001 0x1 | , R + + | increment by 2 " )
ea = register_value
register_obj . increment ( 2 )
self . cycles += 1
elif addr_mode == 0x2 : # log . debug ( " \ t0010 0x2 | , R - | decrement by 1 " )
register_obj . decrement ( 1 )
ea = register_obj . value
elif addr_mode == 0x3 : # log . debug ( " \ t0011 0x3 | , R - - | decrement by 2 " )
register_obj . decrement ( 2 )
ea = register_obj . value
self . cycles += 1
elif addr_mode == 0x4 : # log . debug ( " \ t0100 0x4 | , R | No offset " )
ea = register_value
elif addr_mode == 0x5 : # log . debug ( " \ t0101 0x5 | B , R | B register offset " )
offset = signed8 ( self . accu_b . value )
elif addr_mode == 0x6 : # log . debug ( " \ t0110 0x6 | A , R | A register offset " )
offset = signed8 ( self . accu_a . value )
elif addr_mode == 0x8 : # log . debug ( " \ t1000 0x8 | n , R | 8 bit offset " )
offset = signed8 ( self . read_pc_byte ( ) [ 1 ] )
elif addr_mode == 0x9 : # log . debug ( " \ t1001 0x9 | n , R | 16 bit offset " )
offset = signed16 ( self . read_pc_word ( ) [ 1 ] )
self . cycles += 1
elif addr_mode == 0xa : # log . debug ( " \ t1010 0xa | illegal , set ea = 0 " )
ea = 0
elif addr_mode == 0xb : # log . debug ( " \ t1011 0xb | D , R | D register offset " )
# D - 16 bit concatenated reg . ( A + B )
offset = signed16 ( self . accu_d . value )
# FIXME : signed16 ( ) ok ?
self . cycles += 1
elif addr_mode == 0xc : # log . debug ( " \ t1100 0xc | n , PCR | 8 bit offset from program counter " )
__ , value = self . read_pc_byte ( )
value_signed = signed8 ( value )
ea = self . program_counter . value + value_signed
# log . debug ( " \ tea = pc ( $ % x ) + $ % x = $ % x ( dez . : % i + % i = % i ) " ,
# self . program _ counter , value _ signed , ea ,
# self . program _ counter , value _ signed , ea ,
elif addr_mode == 0xd : # log . debug ( " \ t1101 0xd | n , PCR | 16 bit offset from program counter " )
__ , value = self . read_pc_word ( )
value_signed = signed16 ( value )
ea = self . program_counter . value + value_signed
self . cycles += 1
# log . debug ( " \ tea = pc ( $ % x ) + $ % x = $ % x ( dez . : % i + % i = % i ) " ,
# self . program _ counter , value _ signed , ea ,
# self . program _ counter , value _ signed , ea ,
elif addr_mode == 0xe : # log . error ( " \ tget _ ea _ indexed ( ) : illegal address mode , use 0xffff " )
ea = 0xffff
# illegal
elif addr_mode == 0xf : # log . debug ( " \ t1111 0xf | [ n ] | 16 bit address - extended indirect " )
__ , ea = self . read_pc_word ( )
else :
raise RuntimeError ( "Illegal indexed addressing mode: $%x" % addr_mode )
if offset is not None :
ea = register_value + offset
# log . debug ( " \ t $ % x + $ % x = $ % x ( dez : % i + % i = % i ) " ,
# register _ value , offset , ea ,
# register _ value , offset , ea
ea = ea & 0xffff
if is_bit_set ( postbyte , bit = 4 ) : # bit 4 is 1 - > Indirect
# log . debug ( " \ tIndirect addressing : get new ea from $ % x " , ea )
ea = self . memory . read_word ( ea )
# log . debug ( " \ tIndirect addressing : new ea is $ % x " , ea )
# log . debug ( " \ tget _ ea _ indexed ( ) : return ea = $ % x " , ea )
return ea
|
def parse_directive_locations ( lexer : Lexer ) -> List [ NameNode ] :
"""DirectiveLocations"""
|
# optional leading pipe
expect_optional_token ( lexer , TokenKind . PIPE )
locations : List [ NameNode ] = [ ]
append = locations . append
while True :
append ( parse_directive_location ( lexer ) )
if not expect_optional_token ( lexer , TokenKind . PIPE ) :
break
return locations
|
def download ( source , sink = None ) :
"""Download a file .
Parameters
source : str
Where the file comes from . Some URL .
sink : str or None ( default : same filename in current directory )
Where the file gets stored . Some filepath in the local file system ."""
|
try :
from urllib . request import urlretrieve
# Python 3
except ImportError :
from urllib import urlretrieve
# Python 2
if sink is None :
sink = os . path . abspath ( os . path . split ( source ) [ 1 ] )
urlretrieve ( source , sink )
return sink
|
def cameraUrls ( self , camera = None , home = None , cid = None ) :
"""Return the vpn _ url and the local _ url ( if available ) of a given camera
in order to access to its live feed
Can ' t use the is _ local property which is mostly false in case of operator
dynamic IP change after presence start sequence"""
|
local_url = None
vpn_url = None
if cid :
camera_data = self . cameraById ( cid )
else :
camera_data = self . cameraByName ( camera = camera , home = home )
if camera_data :
vpn_url = camera_data [ 'vpn_url' ]
resp = postRequest ( vpn_url + '/command/ping' )
temp_local_url = resp [ 'local_url' ]
try :
resp = postRequest ( temp_local_url + '/command/ping' , timeout = 1 )
if resp and temp_local_url == resp [ 'local_url' ] :
local_url = temp_local_url
except : # On this particular request , vithout errors from previous requests , error is timeout
local_url = None
return vpn_url , local_url
|
def is_correctness_available ( self , question_id ) :
"""is a measure of correctness available for the question"""
|
response = self . get_response ( question_id )
if response . is_answered ( ) :
item = self . _get_item ( response . get_item_id ( ) )
return item . is_correctness_available_for_response ( response )
return False
|
def asyncImap ( asyncCallable , * iterables ) :
"""itertools . imap for deferred callables"""
|
deferreds = imap ( asyncCallable , * iterables )
return gatherResults ( deferreds , consumeErrors = True )
|
def set_api_server_info ( host = None , port = None , protocol = None ) :
''': param host : API server hostname
: type host : string
: param port : API server port . If not specified , * port * is guessed based on * protocol * .
: type port : string
: param protocol : Either " http " or " https "
: type protocol : string
Overrides the current settings for which API server to communicate
with . Any parameters that are not explicitly specified are not
overridden .'''
|
global APISERVER_PROTOCOL , APISERVER_HOST , APISERVER_PORT , APISERVER
if host is not None :
APISERVER_HOST = host
if port is not None :
APISERVER_PORT = port
if protocol is not None :
APISERVER_PROTOCOL = protocol
if port is None or port == '' :
APISERVER = APISERVER_PROTOCOL + "://" + APISERVER_HOST
else :
APISERVER = APISERVER_PROTOCOL + "://" + APISERVER_HOST + ":" + str ( APISERVER_PORT )
|
def full ( self ) :
"""Return True if there are maxsize items in the queue .
Note : if the Queue was initialized with maxsize = 0 ( the default ) ,
then full ( ) is never True ."""
|
if self . _parent . _maxsize <= 0 :
return False
else :
return self . qsize ( ) >= self . _parent . _maxsize
|
def add_leaves ( self , values_array , do_hash = False ) :
"""Add leaves to the tree .
Similar to chainpoint merkle tree library , this accepts hash values as an array of Buffers or hex strings .
: param values _ array : array of values to add
: param do _ hash : whether to hash the values before inserting"""
|
self . tree [ 'is_ready' ] = False
[ self . _add_leaf ( value , do_hash ) for value in values_array ]
|
def getCompoundIdForFeatureId ( self , featureId ) :
"""Returns server - style compound ID for an internal featureId .
: param long featureId : id of feature in database
: return : string representing ID for the specified GA4GH protocol
Feature object in this FeatureSet ."""
|
if featureId is not None and featureId != "" :
compoundId = datamodel . FeatureCompoundId ( self . getCompoundId ( ) , str ( featureId ) )
else :
compoundId = ""
return str ( compoundId )
|
def format_grammar_string ( grammar_dictionary : Dict [ str , List [ str ] ] ) -> str :
"""Formats a dictionary of production rules into the string format expected
by the Parsimonious Grammar class ."""
|
grammar_string = '\n' . join ( [ f"{nonterminal} = {' / '.join(right_hand_side)}" for nonterminal , right_hand_side in grammar_dictionary . items ( ) ] )
return grammar_string . replace ( "\\" , "\\\\" )
|
def getSegmentInfo ( self , collectActiveData = False ) :
"""Returns information about the distribution of segments , synapses and
permanence values in the current TP . If requested , also returns information
regarding the number of currently active segments and synapses .
The method returns the following tuple :
nSegments , # total number of segments
nSynapses , # total number of synapses
nActiveSegs , # total no . of active segments
nActiveSynapses , # total no . of active synapses
distSegSizes , # a dict where d [ n ] = number of segments with n synapses
distNSegsPerCell , # a dict where d [ n ] = number of cells with n segments
distPermValues , # a dict where d [ p ] = number of synapses with perm = p / 10
distAges , # a list of tuples ( ageRange , numSegments )
nActiveSegs and nActiveSynapses are 0 if collectActiveData is False"""
|
nSegments , nSynapses = 0 , 0
nActiveSegs , nActiveSynapses = 0 , 0
distSegSizes , distNSegsPerCell = { } , { }
distPermValues = { }
# Num synapses with given permanence values
numAgeBuckets = 20
distAges = [ ]
ageBucketSize = int ( ( self . lrnIterationIdx + 20 ) / 20 )
for i in range ( numAgeBuckets ) :
distAges . append ( [ '%d-%d' % ( i * ageBucketSize , ( i + 1 ) * ageBucketSize - 1 ) , 0 ] )
for c in xrange ( self . numberOfCols ) :
for i in xrange ( self . cellsPerColumn ) :
if len ( self . cells [ c ] [ i ] ) > 0 :
nSegmentsThisCell = len ( self . cells [ c ] [ i ] )
nSegments += nSegmentsThisCell
if distNSegsPerCell . has_key ( nSegmentsThisCell ) :
distNSegsPerCell [ nSegmentsThisCell ] += 1
else :
distNSegsPerCell [ nSegmentsThisCell ] = 1
for seg in self . cells [ c ] [ i ] :
nSynapsesThisSeg = seg . getNumSynapses ( )
nSynapses += nSynapsesThisSeg
if distSegSizes . has_key ( nSynapsesThisSeg ) :
distSegSizes [ nSynapsesThisSeg ] += 1
else :
distSegSizes [ nSynapsesThisSeg ] = 1
# Accumulate permanence value histogram
for syn in seg . syns :
p = int ( syn [ 2 ] * 10 )
if distPermValues . has_key ( p ) :
distPermValues [ p ] += 1
else :
distPermValues [ p ] = 1
# Accumulate segment age histogram
age = self . lrnIterationIdx - seg . lastActiveIteration
ageBucket = int ( age / ageBucketSize )
distAges [ ageBucket ] [ 1 ] += 1
# Get active synapse statistics if requested
if collectActiveData :
if self . isSegmentActive ( seg , self . infActiveState [ 't' ] ) :
nActiveSegs += 1
for syn in seg . syns :
if self . activeState [ 't' ] [ syn [ 0 ] ] [ syn [ 1 ] ] == 1 :
nActiveSynapses += 1
return ( nSegments , nSynapses , nActiveSegs , nActiveSynapses , distSegSizes , distNSegsPerCell , distPermValues , distAges )
|
def get_anomalies ( smoothed_errors , y_true , z , window , all_anomalies , error_buffer ) :
"""Helper method to get anomalies ."""
|
mu = np . mean ( smoothed_errors )
sigma = np . std ( smoothed_errors )
epsilon = mu + ( z * sigma )
# compare to epsilon
errors_seq , anomaly_indices , max_error_below_e = group_consecutive_anomalies ( smoothed_errors , epsilon , y_true , error_buffer , window , all_anomalies )
if len ( errors_seq ) > 0 :
anomaly_indices = prune_anomalies ( errors_seq , smoothed_errors , max_error_below_e , anomaly_indices )
return anomaly_indices
|
def computeScaledProbabilities ( listOfScales = [ 1.0 , 1.5 , 2.0 , 2.5 , 3.0 , 3.5 , 4.0 ] , listofkValues = [ 64 , 128 , 256 ] , kw = 32 , n = 1000 , numWorkers = 10 , nTrials = 1000 , ) :
"""Compute the impact of S on match probabilities for a fixed value of n ."""
|
# Create arguments for the possibilities we want to test
args = [ ]
theta , _ = getTheta ( kw )
for ki , k in enumerate ( listofkValues ) :
for si , s in enumerate ( listOfScales ) :
args . append ( { "k" : k , "kw" : kw , "n" : n , "theta" : theta , "nTrials" : nTrials , "inputScaling" : s , "errorIndex" : [ ki , si ] , } )
result = computeMatchProbabilityParallel ( args , numWorkers )
errors = np . zeros ( ( len ( listofkValues ) , len ( listOfScales ) ) )
for r in result :
errors [ r [ "errorIndex" ] [ 0 ] , r [ "errorIndex" ] [ 1 ] ] = r [ "pctMatches" ]
print ( "Errors using scaled inputs, for kw=" , kw )
print ( repr ( errors ) )
plotScaledMatches ( listofkValues , listOfScales , errors , "images/scalar_effect_of_scale_kw" + str ( kw ) + ".pdf" )
|
def parallel_newton ( func , x0 , fprime = None , par_args = ( ) , simple_args = ( ) , tol = 1.48e-8 , maxiter = 50 , parallel = True , ** kwargs ) :
"""A parallelized version of : func : ` scipy . optimize . newton ` .
Arguments :
func
The function to search for zeros , called as ` ` f ( x , [ * par _ args . . . ] , [ * simple _ args . . . ] ) ` ` .
x0
The initial point for the zero search .
fprime
( Optional ) The first derivative of * func * , called the same way .
par _ args
Tuple of additional parallelized arguments .
simple _ args
Tuple of additional arguments passed identically to every invocation .
tol
The allowable error of the zero value .
maxiter
Maximum number of iterations .
parallel
Controls parallelization ; default uses all available cores . See
: func : ` pwkit . parallel . make _ parallel _ helper ` .
kwargs
Passed to : func : ` scipy . optimize . newton ` .
Returns : an array of locations of zeros .
Finds zeros in parallel . The values * x0 * , * tol * , * maxiter * , and the items
of * par _ args * should all be numeric , and may be N - dimensional Numpy
arrays . They are all broadcast to a common shape , and one zero - finding run
is performed for each element in the resulting array . The return value is
an array of zero locations having the same shape as the common broadcast
of the parameters named above .
The * simple _ args * are passed to each function identically for each
integration . They do not need to be Pickle - able .
Example : :
> > > parallel _ newton ( lambda x , a : x - 2 * a , 2,
par _ args = ( np . arange ( 6 ) , ) )
< < < array ( [ 0 . , 2 . , 4 . , 6 . , 8 . , 10 . ] )
> > > parallel _ newton ( lambda x : np . sin ( x ) , np . arange ( 6 ) )
< < < array ( [ 0.00000e + 00 , 3.65526589e - 26 , 3.14159265e + 00,
3.14159265e + 00 , 3.14159265e + 00 , 6.28318531e + 00 ] )"""
|
from scipy . optimize import newton
from . parallel import make_parallel_helper
phelp = make_parallel_helper ( parallel )
if not isinstance ( par_args , tuple ) :
raise ValueError ( 'par_args must be a tuple' )
if not isinstance ( simple_args , tuple ) :
raise ValueError ( 'simple_args must be a tuple' )
bc_raw = np . broadcast_arrays ( x0 , tol , maxiter , * par_args )
bc_1d = tuple ( np . atleast_1d ( a ) for a in bc_raw )
def gen_var_args ( ) :
for i in range ( bc_1d [ 0 ] . size ) :
yield tuple ( x . flat [ i ] for x in bc_1d )
def helper ( i , _ , var_args ) :
x0 , tol , maxiter = var_args [ : 3 ]
args = var_args [ 3 : ] + simple_args
return newton ( func , x0 , fprime = fprime , args = args , tol = tol , maxiter = maxiter , ** kwargs )
with phelp . get_ppmap ( ) as ppmap :
result = np . asarray ( ppmap ( helper , None , gen_var_args ( ) ) )
if bc_raw [ 0 ] . ndim == 0 :
return np . asscalar ( result )
return result
|
def get_page_by_id ( self , page_id , expand = None ) :
"""Get page by ID
: param page _ id : Content ID
: param expand : OPTIONAL : expand e . g . history
: return :"""
|
url = 'rest/api/content/{page_id}?expand={expand}' . format ( page_id = page_id , expand = expand )
return self . get ( url )
|
def create ( self ) :
"""Override method for creating FormBaseNew form"""
|
self . add_handlers ( { '^T' : self . quit , '^Q' : self . quit } )
self . services_tft = self . add ( npyscreen . TitleFixedText , name = 'No services running.' , value = '' )
services = Services ( self . core , external = self . external )
if services :
self . services_tft . hidden = True
for service in services :
value = ''
for val in service [ 1 ] :
value += val + ', '
self . add ( npyscreen . TitleFixedText , name = service [ 0 ] , value = value [ : - 2 ] )
|
def get_result ( self , commit , default = MemoteResult ( ) ) :
"""Return an individual result from the history if it exists ."""
|
assert self . _results is not None , "Please call the method `load_history` first."
return self . _results . get ( commit , default )
|
def get_value_from_view ( context , field ) :
"""Responsible for deriving the displayed value for the passed in ' field ' .
This first checks for a particular method on the ListView , then looks for a method
on the object , then finally treats it as an attribute ."""
|
view = context [ 'view' ]
obj = None
if 'object' in context :
obj = context [ 'object' ]
value = view . lookup_field_value ( context , obj , field )
# it ' s a date
if type ( value ) == datetime :
return format_datetime ( value )
return value
|
def download ( self , ** kwargs ) :
"""If table contains Gravity Spy triggers ` EventTable `
Parameters
nproc : ` int ` , optional , default : 1
number of CPUs to use for parallel file reading
download _ path : ` str ` optional , default : ' download '
Specify where the images end up .
download _ durs : ` list ` optional , default : [ 0.5 , 1.0 , 2.0 , 4.0]
Specify exactly which durations you want to download
default is to download all the avaialble GSpy durations .
kwargs : Optional training _ set and labelled _ samples args
that will download images in a special way
. / " ml _ label " / " sample _ type " / " image "
Returns
Folder containing omega scans sorted by label"""
|
from six . moves . urllib . request import urlopen
import os
# back to pandas
try :
images_db = self . to_pandas ( )
except ImportError as exc :
exc . args = ( 'pandas is required to download triggers' , )
raise
# Remove any broken links
images_db = images_db . loc [ images_db . url1 != '' ]
training_set = kwargs . pop ( 'training_set' , 0 )
labelled_samples = kwargs . pop ( 'labelled_samples' , 0 )
download_location = kwargs . pop ( 'download_path' , os . path . join ( 'download' ) )
duration_values = np . array ( [ 0.5 , 1.0 , 2.0 , 4.0 ] )
download_durs = kwargs . pop ( 'download_durs' , duration_values )
duration_idx = [ ]
for idur in download_durs :
duration_idx . append ( np . argwhere ( duration_values == idur ) [ 0 ] [ 0 ] )
duration_values = duration_values [ duration_idx ]
duration_values = np . array ( [ duration_values ] ) . astype ( str )
# labelled _ samples are only available when requesting the
if labelled_samples :
if 'sample_type' not in images_db . columns :
raise ValueError ( 'You have requested ml_labelled Samples ' 'for a Table which does not have ' 'this column. Did you fetch a ' 'trainingset* table?' )
# If someone wants labelled samples they are
# Definitely asking for the training set but
# may hve forgotten
if labelled_samples and not training_set :
training_set = 1
# Let us check what columns are needed
cols_for_download = [ 'url1' , 'url2' , 'url3' , 'url4' ]
cols_for_download = [ cols_for_download [ idx ] for idx in duration_idx ]
cols_for_download_ext = [ 'ml_label' , 'sample_type' , 'ifo' , 'gravityspy_id' ]
if not training_set :
images_db [ 'ml_label' ] = ''
if not labelled_samples :
images_db [ 'sample_type' ] = ''
if not os . path . isdir ( download_location ) :
os . makedirs ( download_location )
if training_set :
for ilabel in images_db . ml_label . unique ( ) :
if labelled_samples :
for itype in images_db . sample_type . unique ( ) :
if not os . path . isdir ( os . path . join ( download_location , ilabel , itype ) ) :
os . makedirs ( os . path . join ( download_location , ilabel , itype ) )
else :
if not os . path . isdir ( os . path . join ( download_location , ilabel ) ) :
os . makedirs ( os . path . join ( download_location , ilabel ) )
images_for_download = images_db [ cols_for_download ]
images = images_for_download . as_matrix ( ) . flatten ( )
images_for_download_ext = images_db [ cols_for_download_ext ]
duration = np . atleast_2d ( duration_values . repeat ( len ( images_for_download_ext ) , 0 ) . flatten ( ) ) . T
images_for_download_ext = images_for_download_ext . as_matrix ( ) . repeat ( len ( cols_for_download ) , 0 )
images_for_for_download_path = np . array ( [ [ download_location ] ] ) . repeat ( len ( images_for_download_ext ) , 0 )
images = np . hstack ( ( np . atleast_2d ( images ) . T , images_for_download_ext , duration , images_for_for_download_path ) )
def get_image ( url ) :
name = url [ 3 ] + '_' + url [ 4 ] + '_spectrogram_' + url [ 5 ] + '.png'
outfile = os . path . join ( url [ 6 ] , url [ 1 ] , url [ 2 ] , name )
with open ( outfile , 'wb' ) as fout :
fout . write ( urlopen ( url [ 0 ] ) . read ( ) )
# calculate maximum number of processes
nproc = min ( kwargs . pop ( 'nproc' , 1 ) , len ( images ) )
# define multiprocessing method
def _download_single_image ( url ) :
try :
return url , get_image ( url )
except Exception as exc : # pylint : disable = broad - except
if nproc == 1 :
raise
else :
return url , exc
# read files
output = mp_utils . multiprocess_with_queues ( nproc , _download_single_image , images )
# raise exceptions ( from multiprocessing , single process raises inline )
for f , x in output :
if isinstance ( x , Exception ) :
x . args = ( 'Failed to read %s: %s' % ( f , str ( x ) ) , )
raise x
|
def from_request ( cls , http_method , http_url , headers = None , parameters = None , query_string = None ) :
"""Combines multiple parameter sources ."""
|
if parameters is None :
parameters = { }
# Headers
if headers :
auth_header = None
for k , v in headers . items ( ) :
if k . lower ( ) == 'authorization' or k . upper ( ) == 'HTTP_AUTHORIZATION' :
auth_header = v
# Check that the authorization header is OAuth .
if auth_header and auth_header [ : 6 ] == 'OAuth ' :
auth_header = auth_header [ 6 : ]
try : # Get the parameters from the header .
header_params = cls . _split_header ( auth_header )
parameters . update ( header_params )
except :
raise Error ( 'Unable to parse OAuth parameters from ' 'Authorization header.' )
# GET or POST query string .
if query_string :
query_params = cls . _split_url_string ( query_string )
parameters . update ( query_params )
# URL parameters .
param_str = urlparse ( http_url ) [ 4 ]
# query
url_params = cls . _split_url_string ( param_str )
parameters . update ( url_params )
if parameters :
return cls ( http_method , http_url , parameters )
return None
|
def _tls_auth_encrypt ( self , s ) :
"""Return the TLSCiphertext . encrypted _ record for AEAD ciphers ."""
|
wcs = self . tls_session . wcs
write_seq_num = struct . pack ( "!Q" , wcs . seq_num )
wcs . seq_num += 1
return wcs . cipher . auth_encrypt ( s , b"" , write_seq_num )
|
def reset_subscription_since ( self , account_id , datetime_str ) :
"""Handler for ` - - reset - subscription - since ` command .
Args :
account _ id ( int ) : id of the account to reset .
datetime _ str ( str ) : string representing the datetime used in the
next poll to retrieve data since .
Returns :
( str ) json encoded response .
NOTES :
We don ' t care about validation here , we demand the responsibility to
the backend ."""
|
data = { 'account_id' : account_id , 'datetime' : datetime_str , }
return self . _perform_post_request ( self . reset_subscription_since_endpoint , data , self . token_header )
|
def train ( hparams , output_dir , env_problem_name , report_fn = None ) :
"""Train ."""
|
env_fn = initialize_env_specs ( hparams , env_problem_name )
tf . logging . vlog ( 1 , "HParams in trainer_model_free.train : %s" , misc_utils . pprint_hparams ( hparams ) )
tf . logging . vlog ( 1 , "Using hparams.base_algo: %s" , hparams . base_algo )
learner = rl_utils . LEARNERS [ hparams . base_algo ] ( hparams . frame_stack_size , output_dir , output_dir , total_num_epochs = 1 )
policy_hparams = trainer_lib . create_hparams ( hparams . base_algo_params )
rl_utils . update_hparams_from_hparams ( policy_hparams , hparams , hparams . base_algo + "_" )
tf . logging . vlog ( 1 , "Policy HParams : %s" , misc_utils . pprint_hparams ( policy_hparams ) )
# TODO ( konradczechowski ) : remove base _ algo dependance , when evaluation method
# will be decided
if hparams . base_algo == "ppo" :
total_steps = policy_hparams . epochs_num
tf . logging . vlog ( 2 , "total_steps: %d" , total_steps )
eval_every_epochs = policy_hparams . eval_every_epochs
tf . logging . vlog ( 2 , "eval_every_epochs: %d" , eval_every_epochs )
if eval_every_epochs == 0 :
eval_every_epochs = total_steps
policy_hparams . eval_every_epochs = 0
metric_name = rl_utils . get_metric_name ( sampling_temp = hparams . eval_sampling_temps [ 0 ] , max_num_noops = hparams . eval_max_num_noops , clipped = False )
tf . logging . vlog ( 1 , "metric_name: %s" , metric_name )
eval_metrics_dir = os . path . join ( output_dir , "eval_metrics" )
eval_metrics_dir = os . path . expanduser ( eval_metrics_dir )
tf . gfile . MakeDirs ( eval_metrics_dir )
eval_metrics_writer = tf . summary . FileWriter ( eval_metrics_dir )
def evaluate_on_new_model ( model_dir_path ) :
global step
eval_metrics = rl_utils . evaluate_all_configs ( hparams , model_dir_path )
tf . logging . info ( "Agent eval metrics:\n{}" . format ( pprint . pformat ( eval_metrics ) ) )
rl_utils . summarize_metrics ( eval_metrics_writer , eval_metrics , step )
if report_fn :
report_fn ( eval_metrics [ metric_name ] , step )
step += 1
policy_hparams . epochs_num = total_steps
policy_hparams . save_models_every_epochs = eval_every_epochs
else :
def evaluate_on_new_model ( model_dir_path ) :
del model_dir_path
raise NotImplementedError ( "This function is currently implemented only for ppo" )
learner . train ( env_fn , policy_hparams , simulated = False , save_continuously = True , epoch = 0 , model_save_fn = evaluate_on_new_model )
|
def process ( self , job_id ) :
"""Process a job by the queue"""
|
self . _logger . info ( '{:.2f}: Process job {}' . format ( self . _env . now , job_id ) )
# log time of commencement of service
self . _observer . notify_service ( time = self . _env . now , job_id = job_id )
# draw a new service time
try :
service_time = next ( self . _service_time_generator )
except StopIteration : # ERROR : no more service times
error_msg = ( 'Service time generator exhausted' )
self . _logger . error ( error_msg )
# raise a different exception , as simpy uses StopIteration to
# signify end of process ( generator )
raise GGCQServiceTimeStopIteration ( error_msg )
# wait for the service time to pass
try :
self . _logger . debug ( 'Service time: {:.2f}' . format ( service_time ) )
except :
pass
try :
yield self . _env . timeout ( service_time )
except TypeError : # error : service time of wrong type
error_msg = ( "service time '{}' has wrong type '{}'" . format ( service_time , type ( service_time ) . __name__ ) )
self . _logger . error ( error_msg )
# trigger exception
raise GGCQServiceTimeTypeError ( error_msg )
except ValueError as exc :
if str ( exc ) . startswith ( 'Negative delay' ) : # error : negative service time
error_msg = ( "negative service time {:.2f}" . format ( service_time ) )
self . _logger . error ( error_msg )
# trigger exception
raise GGCQNegativeServiceTimeError ( error_msg )
else :
raise
# job finished processing - > departing
self . _logger . info ( '{:.2f}: Finished processing job {}' . format ( self . _env . now , job_id ) )
# log departure epoch
self . _observer . notify_departure ( time = self . _env . now , job_id = job_id )
|
def _converged ( self ) :
"""Check convergence based on maximum absolute difference
Returns
converged : boolean
Whether the parameter estimation converged .
max _ diff : float
Maximum absolute difference between prior and posterior ."""
|
diff = self . local_prior - self . local_posterior_
max_diff = np . max ( np . fabs ( diff ) )
if self . verbose :
_ , mse = self . _mse_converged ( )
diff_ratio = np . sum ( diff ** 2 ) / np . sum ( self . local_posterior_ ** 2 )
logger . info ( 'tfa prior posterior max diff %f mse %f diff_ratio %f' % ( ( max_diff , mse , diff_ratio ) ) )
if max_diff > self . threshold :
return False , max_diff
else :
return True , max_diff
|
def getmatch ( self , regex , group = 0 , flags = 0 ) :
"""The same as # Scanner . match ( ) , but returns the captured group rather than
the regex match object , or None if the pattern didn ' t match ."""
|
match = self . match ( regex , flags )
if match :
return match . group ( group )
return None
|
def GetOobResult ( self , param , user_ip , gitkit_token = None ) :
"""Gets out - of - band code for ResetPassword / ChangeEmail request .
Args :
param : dict of HTTP POST params
user _ ip : string , end user ' s IP address
gitkit _ token : string , the gitkit token if user logged in
Returns :
A dict of {
email : user email who initializes the request
new _ email : the requested new email , for ChangeEmail action only
oob _ link : the generated link to be send to user ' s email
oob _ code : the one time out - of - band code
action : OobAction
response _ body : the http body to be returned to Gitkit widget"""
|
if 'action' in param :
try :
if param [ 'action' ] == GitkitClient . RESET_PASSWORD_ACTION :
request = self . _PasswordResetRequest ( param , user_ip )
oob_code , oob_link = self . _BuildOobLink ( request , param [ 'action' ] )
return { 'action' : GitkitClient . RESET_PASSWORD_ACTION , 'email' : param [ 'email' ] , 'oob_link' : oob_link , 'oob_code' : oob_code , 'response_body' : simplejson . dumps ( { 'success' : True } ) }
elif param [ 'action' ] == GitkitClient . CHANGE_EMAIL_ACTION :
if not gitkit_token :
return self . _FailureOobResponse ( 'login is required' )
request = self . _ChangeEmailRequest ( param , user_ip , gitkit_token )
oob_code , oob_link = self . _BuildOobLink ( request , param [ 'action' ] )
return { 'action' : GitkitClient . CHANGE_EMAIL_ACTION , 'email' : param [ 'oldEmail' ] , 'new_email' : param [ 'newEmail' ] , 'oob_link' : oob_link , 'oob_code' : oob_code , 'response_body' : simplejson . dumps ( { 'success' : True } ) }
except errors . GitkitClientError as error :
return self . _FailureOobResponse ( error . value )
return self . _FailureOobResponse ( 'unknown request type' )
|
def from_items ( cls , items , columns = None , orient = 'columns' ) :
"""Construct a DataFrame from a list of tuples .
. . deprecated : : 0.23.0
` from _ items ` is deprecated and will be removed in a future version .
Use : meth : ` DataFrame . from _ dict ( dict ( items ) ) < DataFrame . from _ dict > `
instead .
: meth : ` DataFrame . from _ dict ( OrderedDict ( items ) ) < DataFrame . from _ dict > `
may be used to preserve the key order .
Convert ( key , value ) pairs to DataFrame . The keys will be the axis
index ( usually the columns , but depends on the specified
orientation ) . The values should be arrays or Series .
Parameters
items : sequence of ( key , value ) pairs
Values should be arrays or Series .
columns : sequence of column labels , optional
Must be passed if orient = ' index ' .
orient : { ' columns ' , ' index ' } , default ' columns '
The " orientation " of the data . If the keys of the
input correspond to column labels , pass ' columns '
( default ) . Otherwise if the keys correspond to the index ,
pass ' index ' .
Returns
DataFrame"""
|
warnings . warn ( "from_items is deprecated. Please use " "DataFrame.from_dict(dict(items), ...) instead. " "DataFrame.from_dict(OrderedDict(items)) may be used to " "preserve the key order." , FutureWarning , stacklevel = 2 )
keys , values = lzip ( * items )
if orient == 'columns' :
if columns is not None :
columns = ensure_index ( columns )
idict = dict ( items )
if len ( idict ) < len ( items ) :
if not columns . equals ( ensure_index ( keys ) ) :
raise ValueError ( 'With non-unique item names, passed ' 'columns must be identical' )
arrays = values
else :
arrays = [ idict [ k ] for k in columns if k in idict ]
else :
columns = ensure_index ( keys )
arrays = values
# GH 17312
# Provide more informative error msg when scalar values passed
try :
return cls . _from_arrays ( arrays , columns , None )
except ValueError :
if not is_nested_list_like ( values ) :
raise ValueError ( 'The value in each (key, value) pair ' 'must be an array, Series, or dict' )
elif orient == 'index' :
if columns is None :
raise TypeError ( "Must pass columns with orient='index'" )
keys = ensure_index ( keys )
# GH 17312
# Provide more informative error msg when scalar values passed
try :
arr = np . array ( values , dtype = object ) . T
data = [ lib . maybe_convert_objects ( v ) for v in arr ]
return cls . _from_arrays ( data , columns , keys )
except TypeError :
if not is_nested_list_like ( values ) :
raise ValueError ( 'The value in each (key, value) pair ' 'must be an array, Series, or dict' )
else : # pragma : no cover
raise ValueError ( "'orient' must be either 'columns' or 'index'" )
|
def list_tasks ( collector ) :
"""List the available _ tasks"""
|
print ( "Usage: dashmat <task>" )
print ( "" )
print ( "Available tasks to choose from are:" )
print ( "-----------------------------------" )
print ( "" )
keygetter = lambda item : item [ 1 ] . label
tasks = sorted ( available_actions . items ( ) , key = keygetter )
sorted_tasks = sorted ( list ( tasks ) , key = lambda item : len ( item [ 0 ] ) )
max_length = max ( len ( name ) for name , _ in sorted_tasks )
for key , task in sorted_tasks :
desc = dedent ( task . __doc__ or "" ) . strip ( ) . split ( '\n' ) [ 0 ]
print ( "\t{0}{1} :-: {2}" . format ( " " * ( max_length - len ( key ) ) , key , desc ) )
print ( "" )
|
def tilequeue_enqueue_full_pyramid_from_toi ( cfg , peripherals , args ) :
"""enqueue a full pyramid from the z10 toi"""
|
logger = make_logger ( cfg , 'enqueue_tiles_of_interest' )
logger . info ( 'Enqueueing tiles of interest' )
logger . info ( 'Fetching tiles of interest ...' )
tiles_of_interest = peripherals . toi . fetch_tiles_of_interest ( )
n_toi = len ( tiles_of_interest )
logger . info ( 'Fetching tiles of interest ... done' )
rawr_yaml = cfg . yml . get ( 'rawr' )
assert rawr_yaml , 'Missing rawr yaml'
group_by_zoom = rawr_yaml . get ( 'group-zoom' )
assert group_by_zoom , 'Missing rawr group-zoom'
assert isinstance ( group_by_zoom , int ) , 'Invalid rawr group-zoom'
if args . zoom_start is None :
zoom_start = group_by_zoom
else :
zoom_start = args . zoom_start
if args . zoom_stop is None :
zoom_stop = cfg . max_zoom + 1
# + 1 because exclusive
else :
zoom_stop = args . zoom_stop
assert zoom_start >= group_by_zoom
assert zoom_stop > zoom_start
ungrouped = [ ]
coords_at_group_zoom = set ( )
for coord_int in tiles_of_interest :
coord = coord_unmarshall_int ( coord_int )
if coord . zoom < zoom_start :
ungrouped . append ( coord )
if coord . zoom >= group_by_zoom :
coord_at_group_zoom = coord . zoomTo ( group_by_zoom ) . container ( )
coords_at_group_zoom . add ( coord_at_group_zoom )
pyramids = coord_pyramids ( coords_at_group_zoom , zoom_start , zoom_stop )
coords_to_enqueue = chain ( ungrouped , pyramids )
queue_writer = peripherals . queue_writer
n_queued , n_in_flight = queue_writer . enqueue_batch ( coords_to_enqueue )
logger . info ( '%d enqueued - %d in flight' % ( n_queued , n_in_flight ) )
logger . info ( '%d tiles of interest processed' % n_toi )
|
def _wrap_coro_function_with_sem ( self , coro_func ) :
"""Decorator set the coro _ function has sem / interval control ."""
|
sem = self . frequency . sem
interval = self . frequency . interval
@ wraps ( coro_func )
async def new_coro_func ( * args , ** kwargs ) :
if sem :
async with sem :
result = await coro_func ( * args , ** kwargs )
if interval :
await asyncio . sleep ( interval )
return result
else :
result = await coro_func ( * args , ** kwargs )
if interval :
await asyncio . sleep ( interval )
return result
return new_coro_func
|
def default ( self , user_input ) :
'''if no other command was invoked'''
|
try :
for i in self . _cs . disasm ( unhexlify ( self . cleanup ( user_input ) ) , self . base_address ) :
print ( "0x%08x:\t%s\t%s" % ( i . address , i . mnemonic , i . op_str ) )
except CsError as e :
print ( "Error: %s" % e )
|
def biselect ( table , * args , ** kwargs ) :
"""Return two tables , the first containing selected rows , the second
containing remaining rows . E . g . : :
> > > import petl as etl
> > > table1 = [ [ ' foo ' , ' bar ' , ' baz ' ] ,
. . . [ ' a ' , 4 , 9.3 ] ,
. . . [ ' a ' , 2 , 88.2 ] ,
. . . [ ' b ' , 1 , 23.3 ] ,
. . . [ ' c ' , 8 , 42.0 ] ,
. . . [ ' d ' , 7 , 100.9 ] ,
. . . [ ' c ' , 2 ] ]
> > > table2 , table3 = etl . biselect ( table1 , lambda rec : rec . foo = = ' a ' )
> > > table2
| foo | bar | baz |
| ' a ' | 4 | 9.3 |
| ' a ' | 2 | 88.2 |
> > > table3
| foo | bar | baz |
| ' b ' | 1 | 23.3 |
| ' c ' | 8 | 42.0 |
| ' d ' | 7 | 100.9 |
| ' c ' | 2 | |
. . versionadded : : 1.1.0"""
|
# override complement kwarg
kwargs [ 'complement' ] = False
t1 = select ( table , * args , ** kwargs )
kwargs [ 'complement' ] = True
t2 = select ( table , * args , ** kwargs )
return t1 , t2
|
def nanmedian ( values , axis = None , skipna = True , mask = None ) :
"""Parameters
values : ndarray
axis : int , optional
skipna : bool , default True
mask : ndarray [ bool ] , optional
nan - mask if known
Returns
result : float
Unless input is a float array , in which case use the same
precision as the input array .
Examples
> > > import pandas . core . nanops as nanops
> > > s = pd . Series ( [ 1 , np . nan , 2 , 2 ] )
> > > nanops . nanmedian ( s )
2.0"""
|
def get_median ( x ) :
mask = notna ( x )
if not skipna and not mask . all ( ) :
return np . nan
return np . nanmedian ( x [ mask ] )
values , mask , dtype , dtype_max , _ = _get_values ( values , skipna , mask = mask )
if not is_float_dtype ( values ) :
values = values . astype ( 'f8' )
values [ mask ] = np . nan
if axis is None :
values = values . ravel ( )
notempty = values . size
# an array from a frame
if values . ndim > 1 : # there ' s a non - empty array to apply over otherwise numpy raises
if notempty :
if not skipna :
return _wrap_results ( np . apply_along_axis ( get_median , axis , values ) , dtype )
# fastpath for the skipna case
return _wrap_results ( np . nanmedian ( values , axis ) , dtype )
# must return the correct shape , but median is not defined for the
# empty set so return nans of shape " everything but the passed axis "
# since " axis " is where the reduction would occur if we had a nonempty
# array
shp = np . array ( values . shape )
dims = np . arange ( values . ndim )
ret = np . empty ( shp [ dims != axis ] )
ret . fill ( np . nan )
return _wrap_results ( ret , dtype )
# otherwise return a scalar value
return _wrap_results ( get_median ( values ) if notempty else np . nan , dtype )
|
def attach_disk ( name = None , kwargs = None , call = None ) :
'''Attach an existing disk to an existing instance .
CLI Example :
. . code - block : : bash
salt - cloud - a attach _ disk myinstance disk _ name = mydisk mode = READ _ WRITE'''
|
if call != 'action' :
raise SaltCloudSystemExit ( 'The attach_disk action must be called with -a or --action.' )
if not name :
log . error ( 'Must specify an instance name.' )
return False
if not kwargs or 'disk_name' not in kwargs :
log . error ( 'Must specify a disk_name to attach.' )
return False
node_name = name
disk_name = kwargs [ 'disk_name' ]
mode = kwargs . get ( 'mode' , 'READ_WRITE' ) . upper ( )
boot = kwargs . get ( 'boot' , False )
auto_delete = kwargs . get ( 'auto_delete' , False )
if boot and boot . lower ( ) in [ 'true' , 'yes' , 'enabled' ] :
boot = True
else :
boot = False
if mode not in [ 'READ_WRITE' , 'READ_ONLY' ] :
log . error ( 'Mode must be either READ_ONLY or (default) READ_WRITE.' )
return False
conn = get_conn ( )
node = conn . ex_get_node ( node_name )
disk = conn . ex_get_volume ( disk_name )
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'attach disk' , 'salt/cloud/disk/attaching' , args = { 'name' : node_name , 'disk_name' : disk_name , 'mode' : mode , 'boot' : boot , } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
result = conn . attach_volume ( node , disk , ex_mode = mode , ex_boot = boot , ex_auto_delete = auto_delete )
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'attached disk' , 'salt/cloud/disk/attached' , args = { 'name' : node_name , 'disk_name' : disk_name , 'mode' : mode , 'boot' : boot , } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
return result
|
def add_tokens_for_single ( self , ignore = False ) :
"""Add the tokens for the single signature"""
|
args = self . single . args
name = self . single . python_name
# Reset indentation to proper amount and add signature
self . reset_indentation ( self . indent_type * self . single . indent )
self . result . extend ( self . tokens . make_single ( name , args ) )
# Add skip if necessary
if ignore :
self . single . skipped = True
self . result . extend ( self . tokens . test_skip )
self . groups . finish_signature ( )
|
def user_loggedin ( sender , ** kwargs ) :
"""collect metrics about user logins"""
|
values = { 'value' : 1 , 'path' : kwargs [ 'request' ] . path , 'user_id' : str ( kwargs [ 'user' ] . pk ) , 'username' : kwargs [ 'user' ] . username , }
write ( 'user_logins' , values = values )
|
def read_message ( self ) :
"""Return the next text or binary message from the socket .
This is an internal method as calling this will not cleanup correctly
if an exception is called . Use ` receive ` instead ."""
|
opcode = None
message = None
while True :
header , payload = self . read_frame ( )
f_opcode = header . opcode
if f_opcode in ( self . OPCODE_TEXT , self . OPCODE_BINARY ) : # a new frame
if opcode :
raise WebSocketError ( "The opcode in non-fin frame is expected to be zero, got {0!r}" . format ( f_opcode ) )
# Start reading a new message , reset the validator
self . utf8validator . reset ( )
self . utf8validate_last = ( True , True , 0 , 0 )
opcode = f_opcode
elif f_opcode == self . OPCODE_CONTINUATION :
if not opcode :
raise WebSocketError ( "Unexpected frame with opcode=0" )
elif f_opcode == self . OPCODE_PING :
self . handle_ping ( header , payload )
continue
elif f_opcode == self . OPCODE_PONG :
self . handle_pong ( header , payload )
continue
elif f_opcode == self . OPCODE_CLOSE :
self . handle_close ( header , payload )
return
else :
raise WebSocketError ( "Unexpected opcode={0!r}" . format ( f_opcode ) )
if opcode == self . OPCODE_TEXT :
self . validate_utf8 ( payload )
if six . PY3 :
payload = payload . decode ( )
if message is None :
message = six . text_type ( ) if opcode == self . OPCODE_TEXT else six . binary_type ( )
message += payload
if header . fin :
break
if opcode == self . OPCODE_TEXT :
if six . PY2 :
self . validate_utf8 ( message )
else :
self . validate_utf8 ( message . encode ( ) )
return message
else :
return bytearray ( message )
|
def subscribe ( self , plan , charge_immediately = True , application_fee_percent = None , coupon = None , quantity = None , metadata = None , tax_percent = None , billing_cycle_anchor = None , trial_end = None , trial_from_plan = None , trial_period_days = None , ) :
"""Subscribes this customer to a plan .
: param plan : The plan to which to subscribe the customer .
: type plan : Plan or string ( plan ID )
: param application _ fee _ percent : This represents the percentage of the subscription invoice subtotal
that will be transferred to the application owner ' s Stripe account .
The request must be made with an OAuth key in order to set an
application fee percentage .
: type application _ fee _ percent : Decimal . Precision is 2 ; anything more will be ignored . A positive
decimal between 1 and 100.
: param coupon : The code of the coupon to apply to this subscription . A coupon applied to a subscription
will only affect invoices created for that particular subscription .
: type coupon : string
: param quantity : The quantity applied to this subscription . Default is 1.
: type quantity : integer
: param metadata : A set of key / value pairs useful for storing additional information .
: type metadata : dict
: param tax _ percent : This represents the percentage of the subscription invoice subtotal that will
be calculated and added as tax to the final amount each billing period .
: type tax _ percent : Decimal . Precision is 2 ; anything more will be ignored . A positive decimal
between 1 and 100.
: param billing _ cycle _ anchor : A future timestamp to anchor the subscription ’ s billing cycle .
This is used to determine the date of the first full invoice , and ,
for plans with month or year intervals , the day of the month for
subsequent invoices .
: type billing _ cycle _ anchor : datetime
: param trial _ end : The end datetime of the trial period the customer will get before being charged for
the first time . If set , this will override the default trial period of the plan the
customer is being subscribed to . The special value ` ` now ` ` can be provided to end
the customer ' s trial immediately .
: type trial _ end : datetime
: param charge _ immediately : Whether or not to charge for the subscription upon creation . If False , an
invoice will be created at the end of this period .
: type charge _ immediately : boolean
: param trial _ from _ plan : Indicates if a plan ’ s trial _ period _ days should be applied to the subscription .
Setting trial _ end per subscription is preferred , and this defaults to false .
Setting this flag to true together with trial _ end is not allowed .
: type trial _ from _ plan : boolean
: param trial _ period _ days : Integer representing the number of trial period days before the customer is
charged for the first time . This will always overwrite any trials that might
apply via a subscribed plan .
: type trial _ period _ days : integer
. . Notes :
. . ` ` charge _ immediately ` ` is only available on ` ` Customer . subscribe ( ) ` `
. . if you ' re using ` ` Customer . subscribe ( ) ` ` instead of ` ` Customer . subscribe ( ) ` ` , ` ` plan ` ` can only be a string"""
|
from . billing import Subscription
# Convert Plan to id
if isinstance ( plan , StripeModel ) :
plan = plan . id
stripe_subscription = Subscription . _api_create ( plan = plan , customer = self . id , application_fee_percent = application_fee_percent , coupon = coupon , quantity = quantity , metadata = metadata , billing_cycle_anchor = billing_cycle_anchor , tax_percent = tax_percent , trial_end = trial_end , trial_from_plan = trial_from_plan , trial_period_days = trial_period_days , )
if charge_immediately :
self . send_invoice ( )
return Subscription . sync_from_stripe_data ( stripe_subscription )
|
def add_picture ( self , p ) :
"""needs to look like this ( under draw : page )
< draw : frame draw : style - name = " gr2 " draw : text - style - name = " P2 " draw : layer = " layout " svg : width = " 19.589cm " svg : height = " 13.402cm " svg : x = " 3.906cm " svg : y = " 4.378cm " >
< draw : image xlink : href = " Pictures / 10000201000002F800000208188B22AE . png " xlink : type = " simple " xlink : show = " embed " xlink : actuate = " onLoad " >
< text : p text : style - name = " P1 " / >
< / draw : image >
< / draw : frame >"""
|
# pictures should be added the the draw : frame element
self . pic_frame = PictureFrame ( self , p )
self . pic_frame . add_node ( "draw:image" , attrib = { "xlink:href" : "Pictures/" + p . internal_name , "xlink:type" : "simple" , "xlink:show" : "embed" , "xlink:actuate" : "onLoad" , } , )
self . _preso . _pictures . append ( p )
node = self . pic_frame . get_node ( )
self . _page . append ( node )
|
def get_name ( ) :
'''Get desktop environment or OS .
Get the OS name or desktop environment .
* * List of Possible Values * *
| Windows | windows |
| Mac OS X | mac |
| GNOME 3 + | gnome |
| GNOME 2 | gnome2 |
| XFCE | xfce4 |
| KDE | kde |
| Unity | unity |
| LXDE | lxde |
| i3wm | i3 |
| \ * box | \ * box |
| Trinity ( KDE 3 fork ) | trinity |
| MATE | mate |
| IceWM | icewm |
| Pantheon ( elementaryOS ) | pantheon |
| LXQt | lxqt |
| Awesome WM | awesome |
| Enlightenment | enlightenment |
| AfterStep | afterstep |
| WindowMaker | windowmaker |
| [ Other ] | unknown |
Returns :
str : The name of the desktop environment or OS .'''
|
if sys . platform in [ 'win32' , 'cygwin' ] :
return 'windows'
elif sys . platform == 'darwin' :
return 'mac'
else :
desktop_session = os . environ . get ( 'XDG_CURRENT_DESKTOP' ) or os . environ . get ( 'DESKTOP_SESSION' )
if desktop_session is not None :
desktop_session = desktop_session . lower ( )
# Fix for X - Cinnamon etc
if desktop_session . startswith ( 'x-' ) :
desktop_session = desktop_session . replace ( 'x-' , '' )
if desktop_session in [ 'gnome' , 'unity' , 'cinnamon' , 'mate' , 'xfce4' , 'lxde' , 'fluxbox' , 'blackbox' , 'openbox' , 'icewm' , 'jwm' , 'afterstep' , 'trinity' , 'kde' , 'pantheon' , 'i3' , 'lxqt' , 'awesome' , 'enlightenment' ] :
return desktop_session
# - - Special cases - - #
# Canonical sets environment var to Lubuntu rather than
# LXDE if using LXDE .
# There is no guarantee that they will not do the same
# with the other desktop environments .
elif 'xfce' in desktop_session :
return 'xfce4'
elif desktop_session . startswith ( 'ubuntu' ) :
return 'unity'
elif desktop_session . startswith ( 'xubuntu' ) :
return 'xfce4'
elif desktop_session . startswith ( 'lubuntu' ) :
return 'lxde'
elif desktop_session . startswith ( 'kubuntu' ) :
return 'kde'
elif desktop_session . startswith ( 'razor' ) :
return 'razor-qt'
elif desktop_session . startswith ( 'wmaker' ) :
return 'windowmaker'
if os . environ . get ( 'KDE_FULL_SESSION' ) == 'true' :
return 'kde'
elif os . environ . get ( 'GNOME_DESKTOP_SESSION_ID' ) :
if not 'deprecated' in os . environ . get ( 'GNOME_DESKTOP_SESSION_ID' ) :
return 'gnome2'
elif is_running ( 'xfce-mcs-manage' ) :
return 'xfce4'
elif is_running ( 'ksmserver' ) :
return 'kde'
return 'unknown'
|
def load_scoring_function ( scoring_func ) :
"""converts mymodule . myfunc in the myfunc
object itself so tpot receives a scoring function"""
|
if scoring_func and ( "." in scoring_func ) :
try :
module_name , func_name = scoring_func . rsplit ( '.' , 1 )
module_path = os . getcwd ( )
sys . path . insert ( 0 , module_path )
scoring_func = getattr ( import_module ( module_name ) , func_name )
sys . path . pop ( 0 )
print ( 'manual scoring function: {}' . format ( scoring_func ) )
print ( 'taken from module: {}' . format ( module_name ) )
except Exception as e :
print ( 'failed importing custom scoring function, error: {}' . format ( str ( e ) ) )
raise ValueError ( e )
return scoring_func
|
def prepend_items ( self , items , ** kwargs ) :
"""Method to prepend data to multiple : class : ` ~ . Item ` objects .
. . seealso : : : meth : ` append _ items `"""
|
rv = self . prepend_multi ( items , ** kwargs )
for k , v in items . dict . items ( ) :
if k . success :
k . value = v [ 'fragment' ] + k . value
return rv
|
def get_connection_state ( self , connection : str ) -> Dict [ str , Any ] :
"""For an already established connection return its state ."""
|
if connection not in self . connections :
raise ConnectionNotOpen ( connection )
return self . connections [ connection ] . state
|
def get_optional_attrs ( optional_attrs ) :
"""Prepare to store data from user - desired optional fields .
Not loading these optional fields by default saves in space and speed .
But allow the possibility for saving these fields , if the user desires ,
Including :
comment consider def is _ class _ level is _ metadata _ tag is _ transitive
relationship replaced _ by subset synonym transitive _ over xref"""
|
attrs_opt = set ( [ 'def' , 'defn' , 'synonym' , 'relationship' , 'xref' , 'subset' , 'comment' ] )
# Required attributes are always loaded . All others are optionally loaded .
# Allow user to specify either : ' def ' or ' defn '
# ' def ' is an obo field name , but ' defn ' is legal Python attribute name
getnm = lambda aopt : aopt if aopt != "defn" else "def"
# pylint : disable = redefined - variable - type
opts = None
if isinstance ( optional_attrs , str ) and optional_attrs in attrs_opt :
opts = set ( [ getnm ( optional_attrs ) ] )
else :
opts = set ( [ getnm ( f ) for f in optional_attrs if f in attrs_opt ] )
if opts :
return opts
|
def calculate_relevance_table ( X , y , ml_task = 'auto' , n_jobs = defaults . N_PROCESSES , chunksize = defaults . CHUNKSIZE , test_for_binary_target_binary_feature = defaults . TEST_FOR_BINARY_TARGET_BINARY_FEATURE , test_for_binary_target_real_feature = defaults . TEST_FOR_BINARY_TARGET_REAL_FEATURE , test_for_real_target_binary_feature = defaults . TEST_FOR_REAL_TARGET_BINARY_FEATURE , test_for_real_target_real_feature = defaults . TEST_FOR_REAL_TARGET_REAL_FEATURE , fdr_level = defaults . FDR_LEVEL , hypotheses_independent = defaults . HYPOTHESES_INDEPENDENT ) :
"""Calculate the relevance table for the features contained in feature matrix ` X ` with respect to target vector ` y ` .
The relevance table is calculated for the intended machine learning task ` ml _ task ` .
To accomplish this for each feature from the input pandas . DataFrame an univariate feature significance test
is conducted . Those tests generate p values that are then evaluated by the Benjamini Hochberg procedure to
decide which features to keep and which to delete .
We are testing
: math : ` H _ 0 ` = the Feature is not relevant and should not be added
against
: math : ` H _ 1 ` = the Feature is relevant and should be kept
or in other words
: math : ` H _ 0 ` = Target and Feature are independent / the Feature has no influence on the target
: math : ` H _ 1 ` = Target and Feature are associated / dependent
When the target is binary this becomes
: math : ` H _ 0 = \\ left ( F _ { \\ text { target } = 1 } = F _ { \\ text { target } = 0 } \\ right ) `
: math : ` H _ 1 = \\ left ( F _ { \\ text { target } = 1 } \\ neq F _ { \\ text { target } = 0 } \\ right ) `
Where : math : ` F ` is the distribution of the target .
In the same way we can state the hypothesis when the feature is binary
: math : ` H _ 0 = \\ left ( T _ { \\ text { feature } = 1 } = T _ { \\ text { feature } = 0 } \\ right ) `
: math : ` H _ 1 = \\ left ( T _ { \\ text { feature } = 1 } \\ neq T _ { \\ text { feature } = 0 } \\ right ) `
Here : math : ` T ` is the distribution of the target .
TODO : And for real valued ?
: param X : Feature matrix in the format mentioned before which will be reduced to only the relevant features .
It can contain both binary or real - valued features at the same time .
: type X : pandas . DataFrame
: param y : Target vector which is needed to test which features are relevant . Can be binary or real - valued .
: type y : pandas . Series or numpy . ndarray
: param ml _ task : The intended machine learning task . Either ` ' classification ' ` , ` ' regression ' ` or ` ' auto ' ` .
Defaults to ` ' auto ' ` , meaning the intended task is inferred from ` y ` .
If ` y ` has a boolean , integer or object dtype , the task is assumend to be classification ,
else regression .
: type ml _ task : str
: param test _ for _ binary _ target _ binary _ feature : Which test to be used for binary target , binary feature ( currently unused )
: type test _ for _ binary _ target _ binary _ feature : str
: param test _ for _ binary _ target _ real _ feature : Which test to be used for binary target , real feature
: type test _ for _ binary _ target _ real _ feature : str
: param test _ for _ real _ target _ binary _ feature : Which test to be used for real target , binary feature ( currently unused )
: type test _ for _ real _ target _ binary _ feature : str
: param test _ for _ real _ target _ real _ feature : Which test to be used for real target , real feature ( currently unused )
: type test _ for _ real _ target _ real _ feature : str
: param fdr _ level : The FDR level that should be respected , this is the theoretical expected percentage of irrelevant
features among all created features .
: type fdr _ level : float
: param hypotheses _ independent : Can the significance of the features be assumed to be independent ?
Normally , this should be set to False as the features are never
independent ( e . g . mean and median )
: type hypotheses _ independent : bool
: param n _ jobs : Number of processes to use during the p - value calculation
: type n _ jobs : int
: param chunksize : The size of one chunk that is submitted to the worker
process for the parallelisation . Where one chunk is defined as a
singular time series for one id and one kind . If you set the chunksize
to 10 , then it means that one task is to calculate all features for 10
time series . If it is set it to None , depending on distributor ,
heuristics are used to find the optimal chunksize . If you get out of
memory exceptions , you can try it with the dask distributor and a
smaller chunksize .
: type chunksize : None or int
: return : A pandas . DataFrame with each column of the input DataFrame X as index with information on the significance
of this particular feature . The DataFrame has the columns
" Feature " ,
" type " ( binary , real or const ) ,
" p _ value " ( the significance of this feature as a p - value , lower means more significant )
" relevant " ( True if the Benjamini Hochberg procedure rejected the null hypothesis [ the feature is
not relevant ] for this feature )
: rtype : pandas . DataFrame"""
|
if ml_task not in [ 'auto' , 'classification' , 'regression' ] :
raise ValueError ( 'ml_task must be one of: \'auto\', \'classification\', \'regression\'' )
elif ml_task == 'auto' :
ml_task = infer_ml_task ( y )
if n_jobs == 0 :
map_function = map
else :
pool = Pool ( n_jobs )
map_function = partial ( pool . map , chunksize = chunksize )
relevance_table = pd . DataFrame ( index = pd . Series ( X . columns , name = 'feature' ) )
relevance_table [ 'feature' ] = relevance_table . index
relevance_table [ 'type' ] = pd . Series ( map_function ( get_feature_type , [ X [ feature ] for feature in relevance_table . index ] ) , index = relevance_table . index )
table_real = relevance_table [ relevance_table . type == 'real' ] . copy ( )
table_binary = relevance_table [ relevance_table . type == 'binary' ] . copy ( )
table_const = relevance_table [ relevance_table . type == 'constant' ] . copy ( )
table_const [ 'p_value' ] = np . NaN
table_const [ 'relevant' ] = False
if len ( table_const ) == len ( relevance_table ) :
return table_const
if ml_task == 'classification' :
tables = [ ]
for label in y . unique ( ) :
_test_real_feature = partial ( target_binary_feature_real_test , y = ( y == label ) , test = test_for_binary_target_real_feature )
_test_binary_feature = partial ( target_binary_feature_binary_test , y = ( y == label ) )
tmp = _calculate_relevance_table_for_implicit_target ( table_real , table_binary , X , _test_real_feature , _test_binary_feature , hypotheses_independent , fdr_level , map_function )
tables . append ( tmp )
relevance_table = combine_relevance_tables ( tables )
elif ml_task == 'regression' :
_test_real_feature = partial ( target_real_feature_real_test , y = y )
_test_binary_feature = partial ( target_real_feature_binary_test , y = y )
relevance_table = _calculate_relevance_table_for_implicit_target ( table_real , table_binary , X , _test_real_feature , _test_binary_feature , hypotheses_independent , fdr_level , map_function )
relevance_table = pd . concat ( [ relevance_table , table_const ] , axis = 0 )
if n_jobs != 0 :
pool . close ( )
pool . terminate ( )
pool . join ( )
if sum ( relevance_table [ 'relevant' ] ) == 0 :
_logger . warning ( "No feature was found relevant for {} for fdr level = {}. " "Consider using a lower fdr level or other features." . format ( ml_task , fdr_level ) )
return relevance_table
|
def call_with_retry ( func : Callable , exceptions , max_retries : int , logger : Logger , * args , ** kwargs ) :
"""Call a function and retry it on failure ."""
|
attempt = 0
while True :
try :
return func ( * args , ** kwargs )
except exceptions as e :
attempt += 1
if attempt >= max_retries :
raise
delay = exponential_backoff ( attempt , cap = 60 )
logger . warning ( '%s: retrying in %s' , e , delay )
time . sleep ( delay . total_seconds ( ) )
|
def is_decorated_with_property_setter ( node ) :
"""Check if the function is decorated as a property setter .
: param node : The node to check .
: type node : astroid . nodes . FunctionDef
: returns : True if the function is a property setter , False otherwise .
: rtype : bool"""
|
if not node . decorators :
return False
for decorator in node . decorators . nodes :
if ( isinstance ( decorator , astroid . nodes . Attribute ) and decorator . attrname == "setter" ) :
return True
return False
|
def read_line_stderr ( self , time_limit = None ) :
"""Read a line from the process .
On Windows , this the time _ limit has no effect , it always blocks ."""
|
if self . proc is not None :
return self . proc . stderr . readline ( ) . decode ( )
else :
return None
|
def read ( self , address , length_bytes , x , y , p = 0 ) :
"""Read a bytestring from an address in memory .
Parameters
address : int
The address at which to start reading the data .
length _ bytes : int
The number of bytes to read from memory . Large reads are
transparently broken into multiple SCP read commands .
Returns
: py : class : ` bytes `
The data is read back from memory as a bytestring ."""
|
# Call the SCPConnection to perform the read on our behalf
connection = self . _get_connection ( x , y )
return connection . read ( self . scp_data_length , self . scp_window_size , x , y , p , address , length_bytes )
|
def poll_values ( ) :
"""Shows how to poll values from the subscription ."""
|
subscription = processor . create_parameter_subscription ( [ '/YSS/SIMULATOR/BatteryVoltage1' ] )
sleep ( 5 )
print ( 'Latest value:' )
print ( subscription . get_value ( '/YSS/SIMULATOR/BatteryVoltage1' ) )
sleep ( 5 )
print ( 'Latest value:' )
print ( subscription . get_value ( '/YSS/SIMULATOR/BatteryVoltage1' ) )
|
def _on_new_location ( self , form ) :
"""Set a new desired location entered in the pop - up form ."""
|
self . _desired_longitude = float ( form . data [ "long" ] )
self . _desired_latitude = float ( form . data [ "lat" ] )
self . _desired_zoom = 13
self . _screen . force_update ( )
|
def repertoire ( self , direction , mechanism , purview ) :
"""Return the cause or effect repertoire based on a direction .
Args :
direction ( Direction ) : | CAUSE | or | EFFECT | .
mechanism ( tuple [ int ] ) : The mechanism for which to calculate the
repertoire .
purview ( tuple [ int ] ) : The purview over which to calculate the
repertoire .
Returns :
np . ndarray : The cause or effect repertoire of the mechanism over
the purview .
Raises :
ValueError : If ` ` direction ` ` is invalid ."""
|
if direction == Direction . CAUSE :
return self . cause_repertoire ( mechanism , purview )
elif direction == Direction . EFFECT :
return self . effect_repertoire ( mechanism , purview )
return validate . direction ( direction )
|
def encode_multipart_formdata ( fields , files ) :
"""fields is a sequence of ( name , value ) elements for regular form fields .
files is a sequence of ( name , filename , value ) elements for data to be uploaded as files
Return ( content _ type , body ) ready for httplib . HTTP instance"""
|
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = [ ]
for ( key , value ) in fields :
L . append ( '--' + BOUNDARY )
L . append ( 'Content-Disposition: form-data; name="%s"' % key )
L . append ( '' )
L . append ( value )
for ( key , filename , value ) in files :
L . append ( '--' + BOUNDARY )
L . append ( 'Content-Disposition: form-data; name="%s"; filename="%s"' % ( key , filename ) )
L . append ( 'Content-Type: %s' % guess_content_type ( filename ) )
L . append ( '' )
L . append ( value )
L . append ( '--' + BOUNDARY + '--' )
L . append ( '' )
body = CRLF . join ( L )
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type , body
|
def slugify ( value , allow_unicode = False ) :
"""Convert to ASCII if ' allow _ unicode ' is False . Convert spaces to hyphens .
Remove characters that aren ' t alphanumerics , underscores , or hyphens .
Convert to lowercase . Also strip leading and trailing whitespace ."""
|
value
if allow_unicode :
value = unicodedata . normalize ( 'NFKC' , value )
else :
value = unicodedata . normalize ( 'NFKD' , value ) . encode ( 'ascii' , 'ignore' ) . decode ( 'ascii' )
value = re . sub ( r'[^\w\s-]' , '' , value ) . strip ( ) . lower ( )
return re . sub ( r'[-\s]+' , '-' , value )
|
def main ( source , reference , downsample , steps , plot ) :
"""Given a source image and a reference image ,
Find the rio color formula which results in an
output with similar histogram to the reference image .
Uses simulated annealing to determine optimal settings .
Increase the - - downsample option to speed things up .
Increase the - - steps to get better results ( longer runtime ) ."""
|
global fig , txt , imgs
click . echo ( "Reading source data..." , err = True )
with rasterio . open ( source ) as src :
if downsample is None :
ratio = calc_downsample ( src . width , src . height )
else :
ratio = downsample
w = int ( src . width // ratio )
h = int ( src . height // ratio )
rgb = src . read ( ( 1 , 2 , 3 ) , out_shape = ( 3 , h , w ) )
orig_rgb = to_math_type ( rgb )
click . echo ( "Reading reference data..." , err = True )
with rasterio . open ( reference ) as ref :
if downsample is None :
ratio = calc_downsample ( ref . width , ref . height )
else :
ratio = downsample
w = int ( ref . width / ratio )
h = int ( ref . height / ratio )
rgb = ref . read ( ( 1 , 2 , 3 ) , out_shape = ( 3 , h , w ) )
ref_rgb = to_math_type ( rgb )
click . echo ( "Annealing..." , err = True )
est = ColorEstimator ( orig_rgb , ref_rgb )
if plot :
import matplotlib . pyplot as plt
fig = plt . figure ( figsize = ( 20 , 10 ) )
fig . suptitle ( "Color Formula Optimization" , fontsize = 18 , fontweight = "bold" )
txt = fig . text ( 0.02 , 0.05 , "foo" , family = "monospace" , fontsize = 16 )
type ( txt )
axs = ( fig . add_subplot ( 1 , 4 , 1 ) , fig . add_subplot ( 1 , 4 , 2 ) , fig . add_subplot ( 1 , 4 , 3 ) , fig . add_subplot ( 1 , 4 , 4 ) , )
fig . tight_layout ( )
axs [ 0 ] . set_title ( "Source" )
axs [ 1 ] . set_title ( "Current Formula" )
axs [ 2 ] . set_title ( "Best Formula" )
axs [ 3 ] . set_title ( "Reference" )
imgs . append ( axs [ 0 ] . imshow ( reshape_as_image ( est . src ) ) )
imgs . append ( axs [ 1 ] . imshow ( reshape_as_image ( est . src ) ) )
imgs . append ( axs [ 2 ] . imshow ( reshape_as_image ( est . src ) ) )
imgs . append ( axs [ 3 ] . imshow ( reshape_as_image ( est . ref ) ) )
fig . show ( )
schedule = dict ( tmax = 25.0 , # Max ( starting ) temperature
tmin = 1e-4 , # Min ( ending ) temperature
steps = steps , # Number of iterations
updates = steps / 20 , # Number of updates
)
est . set_schedule ( schedule )
est . save_state_on_exit = False
optimal , score = est . anneal ( )
optimal [ "energy" ] = score
ops = est . cmd ( optimal )
click . echo ( "rio color -j4 {} {} {}" . format ( source , "/tmp/output.tif" , ops ) )
|
def get_unique_counter ( self , redis_conn = None , host = 'localhost' , port = 6379 , key = 'unique_counter' , cycle_time = 5 , start_time = None , window = SECONDS_1_HOUR , roll = True , keep_max = 12 ) :
'''Generate a new UniqueCounter .
Useful for exactly counting unique objects
@ param redis _ conn : A premade redis connection ( overrides host and port )
@ param host : the redis host
@ param port : the redis port
@ param key : the key for your stats collection
@ param cycle _ time : how often to check for expiring counts
@ param start _ time : the time to start valid collection
@ param window : how long to collect data for in seconds ( if rolling )
@ param roll : Roll the window after it expires , to continue collecting
on a new date based key .
@ keep _ max : If rolling the static window , the max number of prior
windows to keep'''
|
counter = UniqueCounter ( key = key , cycle_time = cycle_time , start_time = start_time , window = window , roll = roll , keep_max = keep_max )
counter . setup ( redis_conn = redis_conn , host = host , port = port )
return counter
|
def register_view ( self , view ) :
"""Called when the View was registered"""
|
super ( GraphicalEditorController , self ) . register_view ( view )
self . view . connect ( 'meta_data_changed' , self . _meta_data_changed )
self . focus_changed_handler_id = self . view . editor . connect ( 'focus-changed' , self . _move_focused_item_into_viewport )
self . view . editor . connect ( "drag-data-received" , self . on_drag_data_received )
self . drag_motion_handler_id = self . view . editor . connect ( "drag-motion" , self . on_drag_motion )
self . setup_canvas ( )
|
def filter ( self , func ) :
"""Returns a packet list filtered by a truth function . This truth
function has to take a packet as the only argument and return a boolean value ."""
|
# noqa : E501
return self . __class__ ( [ x for x in self . res if func ( x ) ] , name = "filtered %s" % self . listname )
|
def user ( self , match ) :
"""Return User object for a given Slack ID or name"""
|
if len ( match ) == 9 and match [ 0 ] == 'U' :
return self . _lookup ( User , 'id' , match )
return self . _lookup ( User , 'name' , match )
|
def _handle_long_response ( self , res ) :
"""Splits messages that are too long into multiple events
: param res : a slack response string or dict"""
|
is_rtm_message = isinstance ( res , basestring )
is_api_message = isinstance ( res , dict )
if is_rtm_message :
text = res
elif is_api_message :
text = res [ 'text' ]
message_length = len ( text )
if message_length <= SLACK_MESSAGE_LIMIT :
return [ res ]
remaining_str = text
responses = [ ]
while remaining_str :
less_than_limit = len ( remaining_str ) < SLACK_MESSAGE_LIMIT
if less_than_limit :
last_line_break = None
else :
last_line_break = remaining_str [ : SLACK_MESSAGE_LIMIT ] . rfind ( '\n' )
if is_rtm_message :
responses . append ( remaining_str [ : last_line_break ] )
elif is_api_message :
template = res . copy ( )
template [ 'text' ] = remaining_str [ : last_line_break ]
responses . append ( template )
if less_than_limit :
remaining_str = None
else :
remaining_str = remaining_str [ last_line_break : ]
self . log . debug ( "_handle_long_response: splitting long response %s, returns: \n %s" , pprint . pformat ( res ) , pprint . pformat ( responses ) )
return responses
|
def load_config ( self , ** kwargs ) :
"""Load the configuration for the user or seed it with defaults .
: return : Boolean if successful"""
|
virgin_config = False
if not os . path . exists ( CONFIG_PATH ) :
virgin_config = True
os . makedirs ( CONFIG_PATH )
if not os . path . exists ( CONFIG_FILE ) :
virgin_config = True
if not virgin_config :
self . config = json . load ( open ( CONFIG_FILE ) )
else :
self . logger . info ( '[!] Processing whitelists, this may take a few minutes...' )
process_whitelists ( )
if kwargs :
self . config . update ( kwargs )
if virgin_config or kwargs :
self . write_config ( )
if 'api_key' not in self . config :
sys . stderr . write ( 'configuration missing API key\n' )
if 'email' not in self . config :
sys . stderr . write ( 'configuration missing email\n' )
if not ( 'api_key' in self . config and 'email' in self . config ) :
sys . stderr . write ( 'Errors have been reported. Run blockade-cfg ' 'to fix these warnings.\n' )
try :
last_update = datetime . strptime ( self . config [ 'whitelist_date' ] , "%Y-%m-%d" )
current = datetime . now ( )
delta = ( current - last_update ) . days
if delta > 14 :
self . logger . info ( '[!] Refreshing whitelists, this may take a few minutes...' )
process_whitelists ( )
self . config [ 'whitelist_date' ] = datetime . now ( ) . strftime ( "%Y-%m-%d" )
self . write_config ( )
except Exception as e :
self . logger . error ( str ( e ) )
self . logger . info ( '[!] Processing whitelists, this may take a few minutes...' )
process_whitelists ( )
self . config [ 'whitelist_date' ] = datetime . now ( ) . strftime ( "%Y-%m-%d" )
self . write_config ( )
return True
|
def prepareInsert ( self , oself , store ) :
"""Prepare for insertion into the database by making the dbunderlying
attribute of the item a relative pathname with respect to the store
rather than an absolute pathname ."""
|
if self . relative :
fspath = self . __get__ ( oself )
oself . __dirty__ [ self . attrname ] = self , self . infilter ( fspath , oself , store )
|
def t_OCTAL ( t ) :
r'[0-7 ] + [ oO ]'
|
t . value = t . value [ : - 1 ]
t . type = 'NUMBER'
t . value = int ( t . value , 8 )
return t
|
def discretize_path ( entities , vertices , path , scale = 1.0 ) :
"""Turn a list of entity indices into a path of connected points .
Parameters
entities : ( j , ) entity objects
Objects like ' Line ' , ' Arc ' , etc .
vertices : ( n , dimension ) float
Vertex points in space .
path : ( m , ) int
Indexes of entities
scale : float
Overall scale of drawing used for
numeric tolerances in certain cases
Returns
discrete : ( p , dimension ) float
Connected points in space that lie on the
path and can be connected with line segments ."""
|
# make sure vertices are numpy array
vertices = np . asanyarray ( vertices )
path_len = len ( path )
if path_len == 0 :
raise ValueError ( 'Cannot discretize empty path!' )
if path_len == 1 : # case where we only have one entity
discrete = np . asanyarray ( entities [ path [ 0 ] ] . discrete ( vertices , scale = scale ) )
else : # run through path appending each entity
discrete = [ ]
for i , entity_id in enumerate ( path ) : # the current ( n , dimension ) discrete curve of an entity
current = entities [ entity_id ] . discrete ( vertices , scale = scale )
# check if we are on the final entity
if i >= ( path_len - 1 ) : # if we are on the last entity include the last point
discrete . append ( current )
else : # slice off the last point so we don ' t get duplicate
# points from the end of one entity and the start of another
discrete . append ( current [ : - 1 ] )
# stack all curves to one nice ( n , dimension ) curve
discrete = np . vstack ( discrete )
# make sure 2D curves are are counterclockwise
if vertices . shape [ 1 ] == 2 and not is_ccw ( discrete ) : # reversing will make array non c - contiguous
discrete = np . ascontiguousarray ( discrete [ : : - 1 ] )
return discrete
|
def _actionsFreqsAngles ( self , * args , ** kwargs ) :
"""NAME :
actionsFreqsAngles ( _ actionsFreqsAngles )
PURPOSE :
evaluate the actions , frequencies , and angles ( jr , lz , jz , Omegar , Omegaphi , Omegaz , angler , anglephi , anglez )
INPUT :
Either :
a ) R , vR , vT , z , vz [ , phi ] :
1 ) floats : phase - space value for single object ( phi is optional ) ( each can be a Quantity )
2 ) numpy . ndarray : [ N ] phase - space values for N objects ( each can be a Quantity )
b ) Orbit instance : initial condition used if that ' s it , orbit ( t ) if there is a time given as well as the second argument
OUTPUT :
( jr , lz , jz , Omegar , Omegaphi , Omegaz , angler , anglephi , anglez )
HISTORY :
2013-09-08 - Written - Bovy ( IAS )"""
|
if len ( args ) == 5 : # R , vR . vT , z , vz pragma : no cover
raise IOError ( "You need to provide phi when calculating angles" )
elif len ( args ) == 6 : # R , vR . vT , z , vz , phi
R , vR , vT , z , vz , phi = args
else :
self . _parse_eval_args ( * args )
R = self . _eval_R
vR = self . _eval_vR
vT = self . _eval_vT
z = self . _eval_z
vz = self . _eval_vz
phi = self . _eval_phi
if isinstance ( R , float ) :
R = nu . array ( [ R ] )
vR = nu . array ( [ vR ] )
vT = nu . array ( [ vT ] )
z = nu . array ( [ z ] )
vz = nu . array ( [ vz ] )
phi = nu . array ( [ phi ] )
if self . _c : # pragma : no cover
pass
else :
Lz = R * vT
Lx = - z * vT
Ly = z * vR - R * vz
L2 = Lx * Lx + Ly * Ly + Lz * Lz
E = self . _ip ( R , z ) + vR ** 2. / 2. + vT ** 2. / 2. + vz ** 2. / 2.
L = nu . sqrt ( L2 )
# Actions
Jphi = Lz
Jz = L - nu . fabs ( Lz )
Jr = self . amp / nu . sqrt ( - 2. * E ) - 0.5 * ( L + nu . sqrt ( ( L2 + 4. * self . amp * self . b ) ) )
# Frequencies
Omegar = ( - 2. * E ) ** 1.5 / self . amp
Omegaz = 0.5 * ( 1. + L / nu . sqrt ( L2 + 4. * self . amp * self . b ) ) * Omegar
Omegaphi = copy . copy ( Omegaz )
indx = Lz < 0.
Omegaphi [ indx ] *= - 1.
# Angles
c = - self . amp / 2. / E - self . b
e2 = 1. - L2 / self . amp / c * ( 1. + self . b / c )
e = nu . sqrt ( e2 )
if self . b == 0. :
coseta = 1 / e * ( 1. - nu . sqrt ( R ** 2. + z ** 2. ) / c )
else :
s = 1. + nu . sqrt ( 1. + ( R ** 2. + z ** 2. ) / self . b ** 2. )
coseta = 1 / e * ( 1. - self . b / c * ( s - 2. ) )
pindx = ( coseta > 1. ) * ( coseta < ( 1. + 10. ** - 7. ) )
coseta [ pindx ] = 1.
pindx = ( coseta < - 1. ) * ( coseta > ( - 1. - 10. ** - 7. ) )
coseta [ pindx ] = - 1.
eta = nu . arccos ( coseta )
costheta = z / nu . sqrt ( R ** 2. + z ** 2. )
sintheta = R / nu . sqrt ( R ** 2. + z ** 2. )
vrindx = ( vR * sintheta + vz * costheta ) < 0.
eta [ vrindx ] = 2. * nu . pi - eta [ vrindx ]
angler = eta - e * c / ( c + self . b ) * nu . sin ( eta )
tan11 = nu . arctan ( nu . sqrt ( ( 1. + e ) / ( 1. - e ) ) * nu . tan ( 0.5 * eta ) )
tan12 = nu . arctan ( nu . sqrt ( ( 1. + e + 2. * self . b / c ) / ( 1. - e + 2. * self . b / c ) ) * nu . tan ( 0.5 * eta ) )
vzindx = ( - vz * sintheta + vR * costheta ) > 0.
tan11 [ tan11 < 0. ] += nu . pi
tan12 [ tan12 < 0. ] += nu . pi
pindx = ( Lz / L > 1. ) * ( Lz / L < ( 1. + 10. ** - 7. ) )
Lz [ pindx ] = L [ pindx ]
pindx = ( Lz / L < - 1. ) * ( Lz / L > ( - 1. - 10. ** - 7. ) )
Lz [ pindx ] = - L [ pindx ]
i = nu . arccos ( Lz / L )
sinpsi = costheta / nu . sin ( i )
pindx = ( sinpsi > 1. ) * ( sinpsi < ( 1. + 10. ** - 7. ) )
sinpsi [ pindx ] = 1.
pindx = ( sinpsi < - 1. ) * ( sinpsi > ( - 1. - 10. ** - 7. ) )
sinpsi [ pindx ] = - 1.
psi = nu . arcsin ( sinpsi )
psi [ vzindx ] = nu . pi - psi [ vzindx ]
psi = psi % ( 2. * nu . pi )
anglez = psi + Omegaz / Omegar * angler - tan11 - 1. / nu . sqrt ( 1. + 4 * self . amp * self . b / L2 ) * tan12
sinu = z / R / nu . tan ( i )
pindx = ( sinu > 1. ) * ( sinu < ( 1. + 10. ** - 7. ) )
sinu [ pindx ] = 1.
pindx = ( sinu < - 1. ) * ( sinu > ( - 1. - 10. ** - 7. ) )
sinu [ pindx ] = - 1.
u = nu . arcsin ( sinu )
u [ vzindx ] = nu . pi - u [ vzindx ]
Omega = phi - u
anglephi = Omega
anglephi [ indx ] -= anglez [ indx ]
anglephi [ True ^ indx ] += anglez [ True ^ indx ]
angler = angler % ( 2. * nu . pi )
anglephi = anglephi % ( 2. * nu . pi )
anglez = anglez % ( 2. * nu . pi )
return ( Jr , Jphi , Jz , Omegar , Omegaphi , Omegaz , angler , anglephi , anglez )
|
def matches ( self , properties ) :
"""Tests if the given properties matches this LDAP filter and its children
: param properties : A dictionary of properties
: return : True if the properties matches this filter , else False"""
|
# Use a generator , and declare it outside of the method call
# = > seems to be quite a speed up trick
generator = ( criterion . matches ( properties ) for criterion in self . subfilters )
# Extract " if " from loops and use built - in methods
if self . operator == OR :
result = any ( generator )
else :
result = all ( generator )
if self . operator == NOT : # Revert result
return not result
return result
|
def get_default_account_data ( self ) -> AccountData :
"""This interface is used to get the default account in WalletManager .
: return : an AccountData object that contain all the information of a default account ."""
|
for acct in self . wallet_in_mem . accounts :
if not isinstance ( acct , AccountData ) :
raise SDKException ( ErrorCode . other_error ( 'Invalid account data in memory.' ) )
if acct . is_default :
return acct
raise SDKException ( ErrorCode . get_default_account_err )
|
def getLoggerWithNullHandler ( logger_name ) :
"""Gets the logger initialized with the ` logger _ name `
and a NullHandler ."""
|
logger = logging . getLogger ( logger_name )
if not logger . handlers :
logger . addHandler ( NullHandler ( ) )
return logger
|
def get_client_nowait ( self ) :
"""Gets a Client object ( not necessary connected ) .
If max _ size is reached , this method will return None ( and won ' t block ) .
Returns :
A Client instance ( not necessary connected ) as result ( or None ) ."""
|
if self . __sem is not None :
if self . __sem . _value == 0 :
return None
self . __sem . acquire ( )
_ , client = self . _get_client_from_pool_or_make_it ( )
return client
|
def com_google_fonts_check_whitespace_glyphnames ( ttFont ) :
"""Font has * * proper * * whitespace glyph names ?"""
|
from fontbakery . utils import get_glyph_name
def getGlyphEncodings ( font , names ) :
result = set ( )
for subtable in font [ 'cmap' ] . tables :
if subtable . isUnicode ( ) :
for codepoint , name in subtable . cmap . items ( ) :
if name in names :
result . add ( codepoint )
return result
if ttFont [ 'post' ] . formatType == 3.0 :
yield SKIP , "Font has version 3 post table."
else :
failed = False
space_enc = getGlyphEncodings ( ttFont , [ "uni0020" , "space" ] )
nbsp_enc = getGlyphEncodings ( ttFont , [ "uni00A0" , "nonbreakingspace" , "nbspace" , "nbsp" ] )
space = get_glyph_name ( ttFont , 0x0020 )
if 0x0020 not in space_enc :
failed = True
yield FAIL , Message ( "bad20" , ( "Glyph 0x0020 is called \"{}\":" " Change to \"space\"" " or \"uni0020\"" ) . format ( space ) )
nbsp = get_glyph_name ( ttFont , 0x00A0 )
if 0x00A0 not in nbsp_enc :
if 0x00A0 in space_enc : # This is OK .
# Some fonts use the same glyph for both space and nbsp .
pass
else :
failed = True
yield FAIL , Message ( "badA0" , ( "Glyph 0x00A0 is called \"{}\":" " Change to \"nbsp\"" " or \"uni00A0\"" ) . format ( nbsp ) )
if failed is False :
yield PASS , "Font has **proper** whitespace glyph names."
|
def get_stars_of_children_of ( self , component ) :
"""same as get _ children _ of except if any of the children are orbits , this will recursively
follow the tree to return a list of all children ( grandchildren , etc ) stars under that orbit"""
|
stars = self . get_stars ( )
orbits = self . get_orbits ( )
stars_children = [ ]
for child in self . get_children_of ( component ) :
if child in stars :
stars_children . append ( child )
elif child in orbits :
stars_children += self . get_stars_of_children_of ( child )
else : # maybe an envelope or eventually spot , ring , etc
pass
return stars_children
|
def _retry_deliveries ( self ) :
"""Handle [ MQTT - 4.4.0-1 ] by resending PUBLISH and PUBREL messages for pending out messages
: return :"""
|
self . logger . debug ( "Begin messages delivery retries" )
tasks = [ ]
for message in itertools . chain ( self . session . inflight_in . values ( ) , self . session . inflight_out . values ( ) ) :
tasks . append ( asyncio . wait_for ( self . _handle_message_flow ( message ) , 10 , loop = self . _loop ) )
if tasks :
done , pending = yield from asyncio . wait ( tasks , loop = self . _loop )
self . logger . debug ( "%d messages redelivered" % len ( done ) )
self . logger . debug ( "%d messages not redelivered due to timeout" % len ( pending ) )
self . logger . debug ( "End messages delivery retries" )
|
def parse_network_data ( data_packet = None , include_filter_key = None , filter_keys = [ ] , record_tcp = True , record_udp = True , record_arp = True , record_icmp = True ) :
"""build _ node
: param data _ packet : raw recvfrom data
: param filter _ keys : list of strings to filter
and remove baby - birding
packets to yourself
: param record _ tcp : want to record TCP frames ?
: param record _ udp : want to record UDP frames ?
: param record _ arp : want to record ARP frames ?
: param record _ icmp : want to record ICMP frames ?"""
|
node = { "id" : build_key ( ) , "data_type" : UNKNOWN , "eth_protocol" : None , "eth_src_mac" : None , "eth_dst_mac" : None , "eth_length" : SIZE_ETH_HEADER , "ip_version_ih1" : None , "ip_version" : None , "ip_ih1" : None , "ip_hdr_len" : None , "ip_tos" : None , "ip_tlen" : None , "ip_id" : None , "ip_frag_off" : None , "ip_ttl" : None , "ip_protocol" : None , "ip_src_addr" : None , "ip_dst_addr" : None , "tcp_src_port" : None , "tcp_dst_port" : None , "tcp_sequence" : None , "tcp_ack" : None , "tcp_resrve" : None , "tcp_data_offset" : None , "tcp_flags" : None , "tcp_adwind" : None , "tcp_urg_ptr" : None , "tcp_ffin" : None , "tcp_fsyn" : None , "tcp_frst" : None , "tcp_fpsh" : None , "tcp_fack" : None , "tcp_furg" : None , "tcp_header_size" : None , "tcp_data_size" : None , "tcp_data" : None , "udp_header_size" : None , "udp_data_size" : None , "udp_src_port" : None , "udp_dst_port" : None , "udp_data_len" : None , "udp_csum" : None , "udp_data" : None , "icmp_header_size" : None , "icmp_data" : None , "icmp_type" : None , "icmp_code" : None , "icmp_csum" : None , "icmp_data_size" : None , "arp_header_size" : None , "arp_data" : None , "arp_hw_type" : None , "arp_proto_type" : None , "arp_hw_size" : None , "arp_proto_size" : None , "arp_opcode" : None , "arp_src_mac" : None , "arp_src_ip" : None , "arp_dst_mac" : None , "arp_dst_ip" : None , "arp_data_size" : None , "target_data" : None , "full_offset" : None , "eth_header_size" : None , "ip_header_size" : None , "err" : "" , "stream" : None , "filtered" : None , "status" : INVALID }
err = "no_data"
if not data_packet :
node [ "error" ] = err
return node
try :
err = "missing_packet"
packet = data_packet [ 0 ]
if len ( packet ) < 21 :
node [ "status" ] = INVALID
node [ "error" ] = "invalid packet={}" . format ( packet )
return node
err = "failed_parsing_ethernet"
eth_packet_min = 0
eth_packet_max = eth_packet_min + node [ "eth_length" ]
log . info ( ( "unpacking ETH[{}:{}]" ) . format ( eth_packet_min , eth_packet_max ) )
eth_datagram = packet [ eth_packet_min : eth_packet_max ]
eth_header = unpack ( ETH_HEADER_FORMAT , eth_datagram )
node [ "eth_protocol" ] = socket . ntohs ( eth_header [ 2 ] )
node [ "eth_src_mac" ] = eth_addr ( packet [ 0 : 6 ] )
node [ "eth_dst_mac" ] = eth_addr ( packet [ 6 : 12 ] )
log . debug ( ( "eth src={} dst={} proto={}" ) . format ( node [ "eth_src_mac" ] , node [ "eth_dst_mac" ] , node [ "eth_protocol" ] ) )
node [ "eth_header_size" ] = SIZE_ETH_HEADER
# Is this an IP packet :
if node [ "eth_protocol" ] == IP_PROTO_ETH :
ip_packet_min = SIZE_ETH_HEADER
ip_packet_max = SIZE_ETH_HEADER + 20
log . info ( ( "unpacking IP[{}:{}]" ) . format ( ip_packet_min , ip_packet_max ) )
err = ( "failed_parsing_IP[{}:{}]" ) . format ( ip_packet_min , ip_packet_max )
# take the first 20 characters for the IP header
ip_datagram = packet [ ip_packet_min : ip_packet_max ]
ip_header = unpack ( IP_HEADER_FORMAT , ip_datagram )
# https : / / docs . python . org / 2 / library / struct . html # format - characters
node [ "ip_header_size" ] = SIZE_IP_HEADER
node [ "ip_version_ih1" ] = ip_header [ 0 ]
node [ "ip_version" ] = node [ "ip_version_ih1" ] >> 4
node [ "ip_ih1" ] = node [ "ip_version_ih1" ] & 0xF
node [ "ip_hdr_len" ] = node [ "ip_ih1" ] * 4
node [ "ip_tos" ] = ip_header [ 1 ]
node [ "ip_tlen" ] = ip_header [ 2 ]
node [ "ip_id" ] = ip_header [ 3 ]
node [ "ip_frag_off" ] = ip_header [ 4 ]
node [ "ip_ttl" ] = ip_header [ 5 ]
node [ "ip_protocol" ] = ip_header [ 6 ]
node [ "ip_src_addr" ] = socket . inet_ntoa ( ip_header [ 8 ] )
node [ "ip_dst_addr" ] = socket . inet_ntoa ( ip_header [ 9 ] )
log . debug ( "-------------------------------------------" )
log . debug ( "IP Header - Layer 3" )
log . debug ( "" )
log . debug ( " - Version: {}" . format ( node [ "ip_version" ] ) )
log . debug ( " - HDR Len: {}" . format ( node [ "ip_ih1" ] ) )
log . debug ( " - TOS: {}" . format ( node [ "ip_tos" ] ) )
log . debug ( " - ID: {}" . format ( node [ "ip_id" ] ) )
log . debug ( " - Frag: {}" . format ( node [ "ip_frag_off" ] ) )
log . debug ( " - TTL: {}" . format ( node [ "ip_ttl" ] ) )
log . debug ( " - Proto: {}" . format ( node [ "ip_protocol" ] ) )
log . debug ( " - Src IP: {}" . format ( node [ "ip_src_addr" ] ) )
log . debug ( " - Dst IP: {}" . format ( node [ "ip_dst_addr" ] ) )
log . debug ( "-------------------------------------------" )
log . debug ( "" )
tcp_data = None
udp_data = None
arp_data = None
icmp_data = None
target_data = None
eh = node [ "eth_header_size" ]
ih = node [ "ip_header_size" ]
log . debug ( ( "parsing ip_protocol={} data" ) . format ( node [ "ip_protocol" ] ) )
if node [ "ip_protocol" ] == TCP_PROTO_IP :
packet_min = node [ "eth_length" ] + node [ "ip_hdr_len" ]
packet_max = packet_min + 20
# unpack the TCP packet
log . info ( ( "unpacking TCP[{}:{}]" ) . format ( packet_min , packet_max ) )
err = ( "failed_parsing_TCP[{}:{}]" ) . format ( packet_min , packet_max )
tcp_datagram = packet [ packet_min : packet_max ]
log . debug ( ( "unpacking TCP Header={}" ) . format ( tcp_datagram ) )
# unpack the TCP packet
tcp_header = unpack ( TCP_HEADER_FORMAT , tcp_datagram )
node [ "tcp_src_port" ] = tcp_header [ 0 ]
node [ "tcp_dst_port" ] = tcp_header [ 1 ]
node [ "tcp_sequence" ] = tcp_header [ 2 ]
node [ "tcp_ack" ] = tcp_header [ 3 ]
node [ "tcp_resrve" ] = tcp_header [ 4 ]
node [ "tcp_data_offset" ] = node [ "tcp_resrve" ] >> 4
node [ "tcp_flags" ] = tcp_header [ 5 ]
node [ "tcp_adwind" ] = tcp_header [ 6 ]
node [ "tcp_urg_ptr" ] = tcp_header [ 7 ]
# parse TCP flags
flag_data = unshift_flags ( node [ "tcp_flags" ] )
node [ "tcp_ffin" ] = flag_data [ 0 ]
node [ "tcp_fsyn" ] = flag_data [ 1 ]
node [ "tcp_frst" ] = flag_data [ 2 ]
node [ "tcp_fpsh" ] = flag_data [ 3 ]
node [ "tcp_fack" ] = flag_data [ 4 ]
node [ "tcp_furg" ] = flag_data [ 5 ]
# process the TCP options if there are
# currently just skip it
node [ "tcp_header_size" ] = SIZE_TCP_HEADER
log . debug ( ( "src={} dst={} seq={} ack={} doff={} flags={} " "f urg={} fin={} syn={} rst={} " "psh={} fack={} urg={}" ) . format ( node [ "tcp_src_port" ] , node [ "tcp_dst_port" ] , node [ "tcp_sequence" ] , node [ "tcp_ack" ] , node [ "tcp_data_offset" ] , node [ "tcp_flags" ] , node [ "tcp_urg_ptr" ] , node [ "tcp_ffin" ] , node [ "tcp_fsyn" ] , node [ "tcp_frst" ] , node [ "tcp_fpsh" ] , node [ "tcp_fack" ] , node [ "tcp_furg" ] ) )
err = "failed_tcp_data"
node [ "data_type" ] = TCP
node [ "tcp_header_size" ] = ( node [ "ip_hdr_len" ] + ( node [ "tcp_data_offset" ] * 4 ) )
node [ "tcp_data_size" ] = len ( packet ) - node [ "tcp_header_size" ]
th = node [ "tcp_header_size" ]
node [ "full_offset" ] = eh + ih + th
log . info ( ( "TCP Data size={} th1={} th2={} " "offset={} value={}" ) . format ( node [ "tcp_data_size" ] , node [ "ip_hdr_len" ] , node [ "tcp_header_size" ] , node [ "full_offset" ] , tcp_data ) )
err = "failed_tcp_data_offset"
tcp_data = packet [ node [ "full_offset" ] : ]
target_data = tcp_data
node [ "error" ] = ""
node [ "status" ] = VALID
elif node [ "ip_protocol" ] == UDP_PROTO_IP :
packet_min = node [ "eth_length" ] + node [ "ip_hdr_len" ]
packet_max = packet_min + 8
# unpack the UDP packet
log . info ( ( "unpacking UDP[{}:{}]" ) . format ( packet_min , packet_max ) )
err = ( "failed_parsing_UDP[{}:{}]" ) . format ( packet_min , packet_max )
udp_datagram = packet [ packet_min : packet_max ]
log . info ( ( "unpacking UDP Header={}" ) . format ( udp_datagram ) )
udp_header = unpack ( UDP_HEADER_FORMAT , udp_datagram )
node [ "udp_header_size" ] = SIZE_UDP_HEADER
node [ "udp_src_port" ] = udp_header [ 0 ]
node [ "udp_dst_port" ] = udp_header [ 1 ]
node [ "udp_data_len" ] = udp_header [ 2 ]
node [ "udp_csum" ] = udp_header [ 3 ]
node [ "data_type" ] = UDP
uh = node [ "udp_header_size" ]
node [ "full_offset" ] = eh + ih + uh
node [ "udp_data_size" ] = len ( packet ) - node [ "udp_header_size" ]
log . info ( ( "UDP Data size={} th1={} th2={} " "offset={} value={}" ) . format ( node [ "udp_data_size" ] , node [ "ip_hdr_len" ] , node [ "udp_header_size" ] , node [ "full_offset" ] , udp_data ) )
err = "failed_udp_data_offset"
udp_data = packet [ node [ "full_offset" ] : ]
target_data = udp_data
node [ "error" ] = ""
node [ "status" ] = VALID
elif node [ "ip_protocol" ] == ICMP_PROTO_IP : # unpack the ICMP packet
packet_min = node [ "eth_length" ] + node [ "ip_hdr_len" ]
packet_max = packet_min + 4
log . info ( ( "unpacking ICMP[{}:{}]" ) . format ( packet_min , packet_max ) )
err = ( "failed_parsing_ICMP[{}:{}]" ) . format ( packet_min , packet_max )
icmp_datagram = packet [ packet_min : packet_max ]
log . info ( ( "unpacking ICMP Header={}" ) . format ( icmp_datagram ) )
icmp_header = unpack ( ICMP_HEADER_FORMAT , icmp_datagram )
node [ "icmp_header_size" ] = SIZE_ICMP_HEADER
node [ "icmp_type" ] = icmp_header [ 0 ]
node [ "icmp_code" ] = icmp_header [ 1 ]
node [ "icmp_csum" ] = icmp_header [ 2 ]
node [ "data_type" ] = ICMP
ah = node [ "icmp_header_size" ]
node [ "full_offset" ] = eh + ih + ah
node [ "icmp_data_size" ] = len ( packet ) - node [ "icmp_header_size" ]
log . info ( ( "ICMP Data size={} th1={} th2={} " "offset={} value={}" ) . format ( node [ "icmp_data_size" ] , node [ "ip_hdr_len" ] , node [ "icmp_header_size" ] , node [ "full_offset" ] , icmp_data ) )
err = "failed_icmp_data_offset"
icmp_data = packet [ node [ "full_offset" ] : ]
target_data = icmp_data
node [ "error" ] = ""
node [ "status" ] = VALID
else :
node [ "error" ] = ( "unsupported_ip_protocol={}" ) . format ( node [ "ip_protocol" ] )
node [ "status" ] = IP_UNSUPPORTED
# end of parsing supported protocols the final node data
if node [ "status" ] == VALID :
log . debug ( "filtering" )
# filter out delimiters in the last 64 bytes
if filter_keys :
err = "filtering={}" . format ( len ( filter_keys ) )
log . debug ( err )
for f in filter_keys :
if target_data :
if str ( f ) in str ( target_data ) :
log . info ( ( "FOUND filter={} " "in data={}" ) . format ( f , target_data ) )
node [ "error" ] = "filtered"
node [ "status" ] = FILTERED
node [ "filtered" ] = f
break
# end of tagging packets to filter out of the
# network - pipe stream
# if there are filters
log . debug ( ( "was filtered={}" ) . format ( node [ "filtered" ] ) )
if not node [ "filtered" ] :
err = "building_stream"
log . debug ( ( "building stream target={}" ) . format ( target_data ) )
stream_size = 0
if target_data :
try : # convert to hex string
err = ( "concerting target_data to " "hex string" )
node [ "target_data" ] = target_data . hex ( )
except Exception as e :
log . info ( ( "failed converting={} to " "utf-8 ex={}" ) . format ( target_data , e ) )
err = "str target_data"
node [ "target_data" ] = target_data
# end of try / ex
stream_size += len ( node [ "target_data" ] )
# end of target _ data
log . debug ( ( "serializing stream={}" ) . format ( node [ "target_data" ] ) )
node_json = json . dumps ( node )
data_stream = str ( "{} {}" ) . format ( node_json , include_filter_key )
log . debug ( "compressing" )
if stream_size :
node [ "stream" ] = data_stream
# end of building the stream
log . debug ( "valid" )
else :
log . error ( ( "unsupported ip frame ip_protocol={}" ) . format ( node [ "ip_protocol" ] ) )
# end of supported IP packet protocol or not
elif node [ "eth_protocol" ] == ARP_PROTO_ETH :
arp_packet_min = SIZE_ETH_HEADER
arp_packet_max = SIZE_ETH_HEADER + 28
log . info ( ( "unpacking ARP[{}:{}]" ) . format ( arp_packet_min , arp_packet_max ) )
err = ( "failed_parsing_ARP[{}:{}]" ) . format ( arp_packet_min , arp_packet_max )
# take the first 28 characters for the ARP header
arp_datagram = packet [ arp_packet_min : arp_packet_max ]
arp_header = unpack ( ARP_HEADER_FORMAT , arp_datagram )
# https : / / docs . python . org / 2 / library / struct . html # format - characters
node [ "arp_header_size" ] = SIZE_ARP_HEADER
node [ "arp_hw_type" ] = arp_header [ 0 ] . hex ( )
node [ "arp_proto_type" ] = arp_header [ 1 ] . hex ( )
node [ "arp_hw_size" ] = arp_header [ 2 ] . hex ( )
node [ "arp_proto_size" ] = arp_header [ 3 ] . hex ( )
node [ "arp_opcode" ] = arp_header [ 4 ] . hex ( )
node [ "arp_src_mac" ] = arp_header [ 5 ] . hex ( )
node [ "arp_src_ip" ] = socket . inet_ntoa ( arp_header [ 6 ] )
node [ "arp_dst_mac" ] = arp_header [ 7 ] . hex ( )
node [ "arp_dst_ip" ] = socket . inet_ntoa ( arp_header [ 8 ] )
arp_data = ""
node [ "arp_data" ] = arp_data
node [ "target_data" ] = arp_data
node [ "data_type" ] = ARP
node [ "status" ] = VALID
node [ "arp_data_size" ] = len ( packet ) - node [ "arp_header_size" ]
node_json = json . dumps ( node )
data_stream = str ( "{} {}" ) . format ( node_json , include_filter_key )
node [ "stream" ] = data_stream
else :
node [ "error" ] = ( "unsupported eth_frame protocol={}" ) . format ( node [ "eth_protocol" ] )
node [ "status" ] = ETH_UNSUPPORTED
log . error ( node [ "error" ] )
# end of supported ETH packet or not
except Exception as e :
node [ "status" ] = ERROR
node [ "error" ] = "err={} failed parsing frame ex={}" . format ( err , e )
log . error ( node [ "error" ] )
# end of try / ex
return node
|
def _title_uptodate ( self , fullfile , pid , _title ) :
"""Check fb photo title against provided title ,
returns true if they match"""
|
i = self . fb . get_object ( pid )
if i . has_key ( 'name' ) :
if _title == i [ 'name' ] :
return True
return False
|
def launched ( ) :
"""Test whether the current python environment is the correct lore env .
: return : : any : ` True ` if the environment is launched
: rtype : bool"""
|
if not PREFIX :
return False
return os . path . realpath ( sys . prefix ) == os . path . realpath ( PREFIX )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.