signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def validate_rmq_ssl_enabled_units ( self , sentry_units , port = None ) :
"""Check that ssl is enabled on rmq juju sentry units .
: param sentry _ units : list of all rmq sentry units
: param port : optional ssl port override to validate
: returns : None if successful , otherwise return error message""" | for sentry_unit in sentry_units :
if not self . rmq_ssl_is_enabled_on_unit ( sentry_unit , port = port ) :
return ( 'Unexpected condition: ssl is disabled on unit ' '({})' . format ( sentry_unit . info [ 'unit_name' ] ) )
return None |
def detectPalmOS ( self ) :
"""Return detection of a PalmOS device
Detects if the current browser is on a PalmOS device .""" | # Most devices nowadays report as ' Palm ' , but some older ones reported as Blazer or Xiino .
if UAgentInfo . devicePalm in self . __userAgent or UAgentInfo . engineBlazer in self . __userAgent or UAgentInfo . engineXiino in self . __userAgent : # Make sure it ' s not WebOS
return not self . detectPalmWebOS ( )
return False |
def s2dctmat ( nfilt , ncep , freqstep ) :
"""Return the ' legacy ' not - quite - DCT matrix used by Sphinx""" | melcos = numpy . empty ( ( ncep , nfilt ) , 'double' )
for i in range ( 0 , ncep ) :
freq = numpy . pi * float ( i ) / nfilt
melcos [ i ] = numpy . cos ( freq * numpy . arange ( 0.5 , float ( nfilt ) + 0.5 , 1.0 , 'double' ) )
melcos [ : , 0 ] = melcos [ : , 0 ] * 0.5
return melcos |
def _secondary_values ( self ) :
"""Getter for secondary series values ( flattened )""" | return [ val for serie in self . secondary_series for val in serie . values if val is not None ] |
def combine ( self , merge_points = False ) :
"""Appends all blocks into a single unstructured grid .
Parameters
merge _ points : bool , optional
Merge coincidental points .""" | alg = vtk . vtkAppendFilter ( )
for block in self :
alg . AddInputData ( block )
alg . SetMergePoints ( merge_points )
alg . Update ( )
return wrap ( alg . GetOutputDataObject ( 0 ) ) |
def as_list ( x ) :
'''Ensure ` x ` is of list type .''' | if x is None :
x = [ ]
elif not isinstance ( x , Sequence ) :
x = [ x ]
return list ( x ) |
def stream_uploadfactory ( content_md5 = None , content_length = None , content_type = None ) :
"""Get default put factory .
If Content - Type is ` ` ' multipart / form - data ' ` ` then the stream is aborted .
: param content _ md5 : The content MD5 . ( Default : ` ` None ` ` )
: param content _ length : The content length . ( Default : ` ` None ` ` )
: param content _ type : The HTTP Content - Type . ( Default : ` ` None ` ` )
: returns : The stream , content length , MD5 of the content .""" | if content_type . startswith ( 'multipart/form-data' ) :
abort ( 422 )
return request . stream , content_length , content_md5 , parse_header_tags ( ) |
def find_table ( self , table ) :
"""Finds a table by name or alias . The FROM tables and JOIN tables
are included in the search .
: type table : str or : class : ` ModelBase < django : django . db . models . base . ModelBase > `
: param table : string of the table name or alias or a ModelBase instance
: return : The table if it is found , otherwise None
: rtype : Table or None""" | table = TableFactory ( table )
identifier = table . get_identifier ( )
join_tables = [ join_item . right_table for join_item in self . joins ]
for table in ( self . tables + join_tables ) :
if table . get_identifier ( ) == identifier :
return table
return None |
def overlay_gateway_access_lists_mac_out_mac_acl_out_name ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
overlay_gateway = ET . SubElement ( config , "overlay-gateway" , xmlns = "urn:brocade.com:mgmt:brocade-tunnels" )
name_key = ET . SubElement ( overlay_gateway , "name" )
name_key . text = kwargs . pop ( 'name' )
access_lists = ET . SubElement ( overlay_gateway , "access-lists" )
mac = ET . SubElement ( access_lists , "mac" )
out = ET . SubElement ( mac , "out" )
mac_acl_out_name = ET . SubElement ( out , "mac-acl-out-name" )
mac_acl_out_name . text = kwargs . pop ( 'mac_acl_out_name' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def create_access_service ( price , service_endpoint , consume_endpoint , timeout = None ) :
"""Publish an asset with an ` Access ` service according to the supplied attributes .
: param price : Asset price , int
: param service _ endpoint : str URL for initiating service access request
: param consume _ endpoint : str URL to consume service
: param timeout : int amount of time in seconds before the agreement expires
: return : Service instance or None""" | timeout = timeout or 3600
# default to one hour timeout
service = ServiceDescriptor . access_service_descriptor ( price , service_endpoint , consume_endpoint , timeout , '' )
return service |
def file_to_list ( path ) :
"""Return the contents of a file as a list when given a path .""" | if not os . path . exists ( path ) :
ui . error ( c . MESSAGES [ "path_missing" ] , path )
sys . exit ( 1 )
with codecs . open ( path , "r" , "UTF-8" ) as contents :
lines = contents . read ( ) . splitlines ( )
return lines |
def _compute_sorted_indices ( self ) :
"""The smoothers need sorted data . This sorts it from the perspective of each column .
if self . _ x [ 0 ] [ 3 ] is the 9th - smallest value in self . _ x [ 0 ] , then _ xi _ sorted [ 3 ] = 8
We only have to sort the data once .""" | sorted_indices = [ ]
for to_sort in [ self . y ] + self . x :
data_w_indices = [ ( val , i ) for ( i , val ) in enumerate ( to_sort ) ]
data_w_indices . sort ( )
sorted_indices . append ( [ i for val , i in data_w_indices ] )
# save in meaningful variable names
self . _yi_sorted = sorted_indices [ 0 ]
# list ( like self . y )
self . _xi_sorted = sorted_indices [ 1 : ] |
def get_core ( self ) :
"""Get an unsatisfiable core if the formula was previously
unsatisfied .""" | if self . lingeling and self . status == False :
return pysolvers . lingeling_core ( self . lingeling , self . prev_assumps ) |
def summary ( self , * inputs ) :
"""Print the summary of the model ' s output and parameters .
The network must have been initialized , and must not have been hybridized .
Parameters
inputs : object
Any input that the model supports . For any tensor in the input , only
: class : ` mxnet . ndarray . NDArray ` is supported .""" | summary = OrderedDict ( )
seen = set ( )
hooks = [ ]
def _get_shape_str ( args ) :
def flatten ( args ) :
if not isinstance ( args , ( list , tuple ) ) :
return [ args ] , int ( 0 )
flat = [ ]
fmts = [ ]
for i in args :
arg , fmt = flatten ( i )
flat . extend ( arg )
fmts . append ( fmt )
return flat , fmts
def regroup ( args , fmt ) :
if isinstance ( fmt , int ) :
if fmt == 0 :
return args [ 0 ] , args [ 1 : ]
return args [ : fmt ] , args [ fmt : ]
ret = [ ]
for i in fmt :
res , args = regroup ( args , i )
ret . append ( res )
return ret , args
flat_args , fmts = flatten ( args )
flat_arg_shapes = [ x . shape if isinstance ( x , ndarray . NDArray ) else x for x in flat_args ]
shapes = regroup ( flat_arg_shapes , fmts ) [ 0 ]
if isinstance ( shapes , list ) :
shape_str = str ( shapes ) [ 1 : - 1 ]
else :
shape_str = str ( shapes )
return shape_str . replace ( 'L' , '' )
def _register_summary_hook ( block ) :
assert not isinstance ( block , HybridBlock ) or not block . _active , '"{}" must not be hybridized to print summary.' . format ( block . name )
def _summary_hook ( block , _ , outputs ) :
class_name = block . __class__ . __name__
block_idx = len ( summary ) - 1
m_key = '%s-%i' % ( class_name , block_idx + 1 )
summary [ m_key ] = OrderedDict ( )
summary [ m_key ] [ 'output_shape' ] = _get_shape_str ( outputs )
params = 0
summary [ m_key ] [ 'trainable' ] = 0
summary [ m_key ] [ 'shared' ] = 0
for p in block . params . values ( ) :
params += p . data ( ) . size
summary [ m_key ] [ 'trainable' ] += 0 if p . grad_req == 'null' else p . data ( ) . size
if p in seen :
summary [ m_key ] [ 'shared' ] += p . data ( ) . size
else :
seen . add ( p )
summary [ m_key ] [ 'n_params' ] = params
from . nn . basic_layers import Sequential , HybridSequential
if not isinstance ( block , ( Sequential , HybridSequential ) ) :
hooks . append ( block . register_forward_hook ( _summary_hook ) )
summary [ 'Input' ] = OrderedDict ( )
summary [ 'Input' ] [ 'output_shape' ] = _get_shape_str ( inputs )
summary [ 'Input' ] [ 'n_params' ] = 0
summary [ 'Input' ] [ 'trainable' ] = 0
summary [ 'Input' ] [ 'shared' ] = 0
try :
self . apply ( _register_summary_hook )
self ( * inputs )
line_format = '{:>20} {:>42} {:>15}'
print ( '-' * 80 )
print ( line_format . format ( 'Layer (type)' , 'Output Shape' , 'Param #' ) )
print ( '=' * 80 )
total_params = 0
trainable_params = 0
shared_params = 0
for layer in summary :
print ( line_format . format ( layer , str ( summary [ layer ] [ 'output_shape' ] ) , summary [ layer ] [ 'n_params' ] ) )
total_params += summary [ layer ] [ 'n_params' ]
trainable_params += summary [ layer ] [ 'trainable' ]
shared_params += summary [ layer ] [ 'shared' ]
print ( '=' * 80 )
print ( 'Parameters in forward computation graph, duplicate included' )
print ( ' Total params: ' + str ( total_params ) )
print ( ' Trainable params: ' + str ( trainable_params ) )
print ( ' Non-trainable params: ' + str ( total_params - trainable_params ) )
print ( 'Shared params in forward computation graph: ' + str ( shared_params ) )
print ( 'Unique parameters in model: ' + str ( total_params - shared_params ) )
print ( '-' * 80 )
finally :
for h in hooks :
h . detach ( ) |
def image ( self , path_img ) :
"""Open image file""" | im_open = Image . open ( path_img )
im = im_open . convert ( "RGB" )
# Convert the RGB image in printable image
pix_line , img_size = self . _convert_image ( im )
self . _print_image ( pix_line , img_size ) |
def _count_model ( self , model ) :
"""return model count""" | try :
res = model . objects . all ( ) . count ( )
except Exception as e :
self . err ( e )
return
return res |
def _read_neuralev ( filename , read_markers = False , trigger_bits = 16 , trigger_zero = True ) :
"""Read some information from NEV
Parameters
filename : str
path to NEV file
read _ markers : bool
whether to read markers or not ( it can get really large )
trigger _ bits : int , optional
8 or 16 , read the triggers as one or two bytes
trigger _ zero : bool , optional
read the trigger zero or not
Returns
MetaTags : list of dict
which corresponds to MetaTags of openNEV
Markers : list of dict
markers in NEV file
Notes
The conversion to DateTime in openNEV . m is not correct . They add a value of
2 to the day . Instead , they should add it to the index of the weekday
It returns triggers as strings ( format of EDFBrowser ) , but it does not read
the othe types of events ( waveforms , videos , etc ) .
The time stamps are stored in UTC in the NSx files . However , time stamps
in the NEV files are stored as local time up to Central 6.03 included and
stored as UTC after Central 6.05 . It ' s impossible to know the version of
Central from the header .""" | hdr = { }
with open ( filename , 'rb' ) as f :
BasicHdr = f . read ( 336 )
i1 = 8
hdr [ 'FileTypeID' ] = BasicHdr [ : i1 ] . decode ( 'utf-8' )
assert hdr [ 'FileTypeID' ] == 'NEURALEV'
i0 , i1 = i1 , i1 + 2
filespec = unpack ( 'bb' , BasicHdr [ i0 : i1 ] )
hdr [ 'FileSpec' ] = str ( filespec [ 0 ] ) + '.' + str ( filespec [ 1 ] )
i0 , i1 = i1 , i1 + 2
hdr [ 'Flags' ] = unpack ( '<H' , BasicHdr [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 4
hdr [ 'HeaderOffset' ] = unpack ( '<I' , BasicHdr [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 4
hdr [ 'PacketBytes' ] = unpack ( '<I' , BasicHdr [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 4
hdr [ 'TimeRes' ] = unpack ( '<I' , BasicHdr [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 4
hdr [ 'SampleRes' ] = unpack ( '<I' , BasicHdr [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 16
time = unpack ( '<' + 'H' * 8 , BasicHdr [ i0 : i1 ] )
hdr [ 'DateTimeRaw' ] = time
lg . warning ( 'DateTime is in local time with Central version <= 6.03' ' and in UTC with Central version > 6.05' )
hdr [ 'DateTime' ] = datetime ( time [ 0 ] , time [ 1 ] , time [ 3 ] , time [ 4 ] , time [ 5 ] , time [ 6 ] , time [ 7 ] * 1000 )
i0 , i1 = i1 , i1 + 32
# hdr [ ' Application ' ] = _ str ( BasicHdr [ i0 : i1 ] . decode ( ' utf - 8 ' ) )
i0 , i1 = i1 , i1 + 256
hdr [ 'Comment' ] = _str ( BasicHdr [ i0 : i1 ] . decode ( 'utf-8' , errors = 'replace' ) )
i0 , i1 = i1 , i1 + 4
countExtHeader = unpack ( '<I' , BasicHdr [ i0 : i1 ] ) [ 0 ]
# you can read subject name from sif
# Check data duration
f . seek ( - hdr [ 'PacketBytes' ] , SEEK_END )
hdr [ 'DataDuration' ] = unpack ( '<I' , f . read ( 4 ) ) [ 0 ]
hdr [ 'DataDurationSec' ] = hdr [ 'DataDuration' ] / hdr [ 'SampleRes' ]
# Read the Extended Header
f . seek ( 336 )
ElectrodesInfo = [ ]
IOLabels = [ ]
for i in range ( countExtHeader ) :
ExtendedHeader = f . read ( 32 )
i1 = 8
PacketID = ExtendedHeader [ : i1 ] . decode ( 'utf-8' )
if PacketID == 'NEUEVWAV' :
elec = { }
i0 , i1 = i1 , i1 + 2
elec [ 'ElectrodeID' ] = unpack ( '<H' , ExtendedHeader [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 1
elec [ 'ConnectorBank' ] = chr ( ExtendedHeader [ i0 ] + 64 )
i0 , i1 = i1 , i1 + 1
elec [ 'ConnectorPin' ] = ExtendedHeader [ i0 ]
i0 , i1 = i1 , i1 + 2
df = unpack ( '<h' , ExtendedHeader [ i0 : i1 ] ) [ 0 ]
# This is a workaround for the DigitalFactor overflow
if df == 21516 :
elec [ 'DigitalFactor' ] = 152592.547
else :
elec [ 'DigitalFactor' ] = df
i0 , i1 = i1 , i1 + 2
elec [ 'EnergyThreshold' ] = unpack ( '<H' , ExtendedHeader [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 2
elec [ 'HighThreshold' ] = unpack ( '<h' , ExtendedHeader [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 2
elec [ 'LowThreshold' ] = unpack ( '<h' , ExtendedHeader [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 1
elec [ 'Units' ] = ExtendedHeader [ i0 ]
i0 , i1 = i1 , i1 + 1
elec [ 'WaveformBytes' ] = ExtendedHeader [ i0 ]
ElectrodesInfo . append ( elec )
elif PacketID == 'NEUEVLBL' :
i0 , i1 = i1 , i1 + 2
ElectrodeID = unpack ( '<H' , ExtendedHeader [ i0 : i1 ] ) [ 0 ] - 1
s = _str ( ExtendedHeader [ i1 : ] . decode ( 'utf-8' ) )
ElectrodesInfo [ ElectrodeID ] [ 'ElectrodeLabel' ] = s
elif PacketID == 'NEUEVFLT' :
elec = { }
i0 , i1 = i1 , i1 + 2
ElectrodeID = unpack ( '<H' , ExtendedHeader [ i0 : i1 ] ) [ 0 ] - 1
i0 , i1 = i1 , i1 + 4
elec [ 'HighFreqCorner' ] = unpack ( '<I' , ExtendedHeader [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 4
elec [ 'HighFreqOrder' ] = unpack ( '<I' , ExtendedHeader [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 2
elec [ 'HighFilterType' ] = unpack ( '<H' , ExtendedHeader [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 4
elec [ 'LowFreqCorner' ] = unpack ( '<I' , ExtendedHeader [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 4
elec [ 'LowFreqOrder' ] = unpack ( '<I' , ExtendedHeader [ i0 : i1 ] ) [ 0 ]
i0 , i1 = i1 , i1 + 2
elec [ 'LowFilterType' ] = unpack ( '<H' , ExtendedHeader [ i0 : i1 ] ) [ 0 ]
ElectrodesInfo [ ElectrodeID ] . update ( elec )
elif PacketID == 'DIGLABEL' : # TODO : the order is not taken into account and probably wrong !
iolabel = { }
iolabel [ 'mode' ] = ExtendedHeader [ 24 ] + 1
s = _str ( ExtendedHeader [ 8 : 25 ] . decode ( 'utf-8' ) )
iolabel [ 'label' ] = s
IOLabels . append ( iolabel )
else :
raise NotImplementedError ( PacketID + ' not implemented yet' )
hdr [ 'ChannelID' ] = [ x [ 'ElectrodeID' ] for x in ElectrodesInfo ]
fExtendedHeader = f . tell ( )
fData = f . seek ( 0 , SEEK_END )
countDataPacket = int ( ( fData - fExtendedHeader ) / hdr [ 'PacketBytes' ] )
markers = [ ]
if read_markers and countDataPacket :
f . seek ( fExtendedHeader )
x = f . read ( countDataPacket * hdr [ 'PacketBytes' ] )
DigiValues = [ ]
for j in range ( countDataPacket ) :
i = j * hdr [ 'PacketBytes' ]
if trigger_bits == 16 :
tempDigiVals = unpack ( '<H' , x [ 8 + i : 10 + i ] ) [ 0 ]
else :
tempDigiVals = unpack ( '<H' , x [ 8 + i : 9 + i ] + b'\x00' ) [ 0 ]
val = { 'timestamp' : unpack ( '<I' , x [ 0 + i : 4 + i ] ) [ 0 ] , 'packetID' : unpack ( '<H' , x [ 4 + i : 6 + i ] ) [ 0 ] , 'tempClassOrReason' : unpack ( '<B' , x [ 6 + i : 7 + i ] ) [ 0 ] , 'tempDigiVals' : tempDigiVals }
if tempDigiVals != 0 or False :
DigiValues . append ( val )
digserPacketID = 0
not_serialdigital = [ x for x in DigiValues if not x [ 'packetID' ] == digserPacketID ]
if not_serialdigital :
lg . debug ( 'Code not implemented to read PacketID ' + str ( not_serialdigital [ 0 ] [ 'packetID' ] ) )
# convert to markers
for val in DigiValues :
m = { 'name' : str ( val [ 'tempDigiVals' ] ) , 'start' : val [ 'timestamp' ] / hdr [ 'SampleRes' ] , 'end' : val [ 'timestamp' ] / hdr [ 'SampleRes' ] , 'chan' : [ '' ] , }
markers . append ( m )
if read_markers :
return markers
else :
return hdr |
def validate_model ( self ) :
"""Validate the model""" | model : AssetAllocationModel = self . get_asset_allocation_model ( )
model . logger = self . logger
valid = model . validate ( )
if valid :
print ( f"The model is valid. Congratulations" )
else :
print ( f"The model is invalid." ) |
def process_execute ( function , * args , ** kwargs ) :
"""Runs the given function returning its results or exception .""" | try :
return function ( * args , ** kwargs )
except Exception as error :
error . traceback = format_exc ( )
return RemoteException ( error , error . traceback ) |
def upload ( self , stop_at = None ) :
"""Perform file upload .
Performs continous upload of chunks of the file . The size uploaded at each cycle is
the value of the attribute ' chunk _ size ' .
: Args :
- stop _ at ( Optional [ int ] ) :
Determines at what offset value the upload should stop . If not specified this
defaults to the file size .""" | self . stop_at = stop_at or self . file_size
while self . offset < self . stop_at :
self . upload_chunk ( )
else :
if self . log_func :
self . log_func ( "maximum upload specified({} bytes) has been reached" . format ( self . stop_at ) ) |
def validate_cnxml ( * content_filepaths ) :
"""Validates the given CNXML file against the cnxml - jing . rng RNG .""" | content_filepaths = [ Path ( path ) . resolve ( ) for path in content_filepaths ]
return jing ( CNXML_JING_RNG , * content_filepaths ) |
def is_active ( self , instance , conditions ) :
"""value is the current value of the switch
instance is the instance of our type""" | if isinstance ( instance , User ) :
return super ( UserConditionSet , self ) . is_active ( instance , conditions )
# HACK : allow is _ authenticated to work on AnonymousUser
condition = conditions . get ( self . get_namespace ( ) , { } ) . get ( 'is_anonymous' )
if condition is not None :
return bool ( condition )
return None |
def _ParseSignatureIdentifiers ( self , data_location , signature_identifiers ) :
"""Parses the signature identifiers .
Args :
data _ location ( str ) : location of the format specification file , for
example , " signatures . conf " .
signature _ identifiers ( str ) : comma separated signature identifiers .
Raises :
IOError : if the format specification file could not be read from
the specified data location .
OSError : if the format specification file could not be read from
the specified data location .
ValueError : if no data location was specified .""" | if not signature_identifiers :
return
if not data_location :
raise ValueError ( 'Missing data location.' )
path = os . path . join ( data_location , 'signatures.conf' )
if not os . path . exists ( path ) :
raise IOError ( 'No such format specification file: {0:s}' . format ( path ) )
try :
specification_store = self . _ReadSpecificationFile ( path )
except IOError as exception :
raise IOError ( ( 'Unable to read format specification file: {0:s} with error: ' '{1!s}' ) . format ( path , exception ) )
signature_identifiers = signature_identifiers . lower ( )
signature_identifiers = [ identifier . strip ( ) for identifier in signature_identifiers . split ( ',' ) ]
file_entry_filter = file_entry_filters . SignaturesFileEntryFilter ( specification_store , signature_identifiers )
self . _filter_collection . AddFilter ( file_entry_filter ) |
def getTypeInfo ( self , sql_type ) : # nopep8
"""Executes SQLGetTypeInfo a creates a result set with information
about the specified data type or all data types supported by the
ODBC driver if not specified .""" | fut = self . _run_operation ( self . _impl . getTypeInfo , sql_type )
return fut |
def qteUnbindKeyFromWidgetObject ( self , keysequence , widgetObj : QtGui . QWidget ) :
"""Disassociate the macro triggered by ` ` keysequence ` ` from
` ` widgetObj ` ` .
The ` ` keysequence ` ` can be specified either as a string ( eg
' < ctrl > + x < ctrl > + f ' ) , or a list of tuples containing the
constants from the ` ` QtCore . Qt ` ` name space
( eg . [ ( ControlModifier , Key _ X ) , ( ControlModifier , Key _ F ) ] ) , or
as a ` ` QtmacsKeysequence ` ` object .
This method does not affect the key bindings of other applets .
| Args |
* ` ` keysequence ` ` ( * * str * * , * * list * * of * * tuples * * ,
* * QtmacsKeysequence * * ) :
key sequence to activate ` ` macroName ` ` for specified
` ` widgetSignature ` ` .
* ` ` widgetObj ` ` ( * * QWidget * * ) : determines which widgets
signature to use .
| Returns |
* * * None * *
| Raises |
* * * QtmacsArgumentError * * if at least one argument has an invalid type .
* * * QtmacsKeysequenceError * * if the provided ` ` keysequence ` `
could not be parsed .
* * * QtmacsOtherError * * if ` ` widgetObj ` ` was not added with
` ` qteAddWidget ` ` .""" | # Convert the key sequence into a QtmacsKeysequence object , or
# raise an QtmacsKeysequenceError if the conversion is
# impossible .
keysequence = QtmacsKeysequence ( keysequence )
# Check type of input arguments .
if not hasattr ( widgetObj , '_qteAdmin' ) :
msg = '<widgetObj> was probably not added with <qteAddWidget>'
msg += ' method because it lacks the <_qteAdmin> attribute.'
raise QtmacsOtherError ( msg )
# Remove the key sequence from the local key maps .
widgetObj . _qteAdmin . keyMap . qteRemoveKey ( keysequence ) |
def use ( self , tube ) :
"""Start producing jobs into the given tube .
: param tube : Name of the tube to USE
Subsequent calls to : func : ` put _ job ` insert jobs into this tube .""" | with self . _sock_ctx ( ) as socket :
if self . current_tube != tube :
self . desired_tube = tube
self . _send_message ( 'use {0}' . format ( tube ) , socket )
self . _receive_name ( socket )
self . current_tube = tube |
def reload_using_spawn_wait ( self ) :
"""Spawn a subprocess and wait until it finishes .
: return :
None .""" | # Create command parts
cmd_parts = [ sys . executable ] + sys . argv
# Get env dict copy
env_copy = os . environ . copy ( )
# Send interrupt to main thread
interrupt_main ( )
# Spawn subprocess and wait until it finishes
subprocess . call ( cmd_parts , env = env_copy , close_fds = True )
# Exit the watcher thread
sys . exit ( 0 ) |
def update_router ( self , context , router_id , router ) :
"""Update an existing router in DB , and update it in Arista HW .""" | # Read existing router record from DB
original_router = self . get_router ( context , router_id )
# Update router DB
new_router = super ( AristaL3ServicePlugin , self ) . update_router ( context , router_id , router )
# Modify router on the Arista Hw
try :
self . driver . update_router ( context , router_id , original_router , new_router )
return new_router
except Exception :
LOG . error ( _LE ( "Error updating router on Arista HW router=%s " ) , new_router ) |
def _getitem_iterable ( self , key , axis = None ) :
"""Index current object with an an iterable key ( which can be a boolean
indexer , or a collection of keys ) .
Parameters
key : iterable
Target labels , or boolean indexer
axis : int , default None
Dimension on which the indexing is being made
Raises
KeyError
If no key was found . Will change in the future to raise if not all
keys were found .
IndexingError
If the boolean indexer is unalignable with the object being
indexed .
Returns
scalar , DataFrame , or Series : indexed value ( s ) ,""" | if axis is None :
axis = self . axis or 0
self . _validate_key ( key , axis )
labels = self . obj . _get_axis ( axis )
if com . is_bool_indexer ( key ) : # A boolean indexer
key = check_bool_indexer ( labels , key )
inds , = key . nonzero ( )
return self . obj . _take ( inds , axis = axis )
else : # A collection of keys
keyarr , indexer = self . _get_listlike_indexer ( key , axis , raise_missing = False )
return self . obj . _reindex_with_indexers ( { axis : [ keyarr , indexer ] } , copy = True , allow_dups = True ) |
def _raw_aspera_metadata ( self , bucket ) :
'''get the Aspera connection details on Aspera enabled buckets''' | response = self . _client . get_bucket_aspera ( Bucket = bucket )
# Parse metadata from response
aspera_access_key = response [ 'AccessKey' ] [ 'Id' ]
aspera_secret_key = response [ 'AccessKey' ] [ 'Secret' ]
ats_endpoint = response [ 'ATSEndpoint' ]
return aspera_access_key , aspera_secret_key , ats_endpoint |
def _random_fill ( self ) :
"""Fill the board with random tiles based on the Tile class .""" | a = self . _array
for p , tile in self . positions_with_tile ( ) :
if tile . is_blank ( ) :
a [ p ] = Tile . random_tile ( ) |
def pauli_string_half ( circuit : circuits . Circuit ) -> circuits . Circuit :
"""Return only the non - Clifford part of a circuit . See
convert _ and _ separate _ circuit ( ) .
Args :
circuit : A Circuit with the gate set { SingleQubitCliffordGate ,
PauliInteractionGate , PauliStringPhasor } .
Returns :
A Circuit with only PauliStringPhasor operations .""" | return circuits . Circuit . from_ops ( _pull_non_clifford_before ( circuit ) , strategy = circuits . InsertStrategy . EARLIEST ) |
def most_recent_among ( versions ) :
"""Compare a list of NSS versions and return the latest one .
Uses first _ older _ than _ second ( ) for comparison .
: param versions : an array of NSS version strings
: return : verbatim copy of the most recent version string""" | latest = versions [ 0 ]
for v in versions [ 1 : ] :
if nssversion . first_older_than_second ( latest , v ) :
latest = v
return latest |
def classify ( self ) :
"""* classify the transients selected from the transient selection query in the settings file or passed in via the CL or other code *
* * Return : * *
- ` ` crossmatches ` ` - - list of dictionaries of crossmatched associated sources
- ` ` classifications ` ` - - the classifications assigned to the transients post - crossmatches ( dictionary of rank ordered list of classifications )
See class docstring for usage .
. . todo : :
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring""" | global theseBatches
global crossmatchArray
self . log . debug ( 'starting the ``classify`` method' )
remaining = 1
# THE COLUMN MAPS - WHICH COLUMNS IN THE CATALOGUE TABLES = RA , DEC ,
# REDSHIFT , MAG ETC
colMaps = get_crossmatch_catalogues_column_map ( log = self . log , dbConn = self . cataloguesDbConn )
self . _create_tables_if_not_exist ( )
import time
start_time = time . time ( )
# COUNT SEARCHES
sa = self . settings [ "search algorithm" ]
searchCount = 0
brightnessFilters = [ "bright" , "faint" , "general" ]
for search_name , searchPara in sa . iteritems ( ) :
for bf in brightnessFilters :
if bf in searchPara :
searchCount += 1
cpuCount = psutil . cpu_count ( )
if searchCount > cpuCount :
searchCount = cpuCount
largeBatchSize = 5000
miniBatchSize = 100
self . largeBatchSize = largeBatchSize
# print " mini batch size " , str ( miniBatchSize )
while remaining : # IF A TRANSIENT HAS NOT BEEN PASSED IN VIA THE COMMAND - LINE , THEN
# QUERY THE TRANSIENT DATABASE
if not self . ra and not self . dec : # COUNT REMAINING TRANSIENTS
from fundamentals . mysql import readquery
sqlQuery = self . settings [ "database settings" ] [ "transients" ] [ "transient count" ]
thisInt = randint ( 0 , 100 )
if "where" in sqlQuery :
sqlQuery = sqlQuery . replace ( "where" , "where %(thisInt)s=%(thisInt)s and " % locals ( ) )
if remaining == 1 or remaining < largeBatchSize :
rows = readquery ( log = self . log , sqlQuery = sqlQuery , dbConn = self . transientsDbConn , )
remaining = rows [ 0 ] [ "count(*)" ]
else :
remaining = remaining - largeBatchSize
print "%(remaining)s transient sources requiring a classification remain" % locals ( )
# START THE TIME TO TRACK CLASSIFICATION SPPED
start_time = time . time ( )
# A LIST OF DICTIONARIES OF TRANSIENT METADATA
transientsMetadataList = self . _get_transient_metadata_from_database_list ( )
count = len ( transientsMetadataList )
print " now classifying the next %(count)s transient sources" % locals ( )
# EXAMPLE OF TRANSIENT METADATA
# { ' name ' : ' PS17gx ' ,
# ' alt _ id ' : ' PS17gx ' ,
# ' object _ classification ' : ' SN ' ,
# ' dec ' : ' + 43:25:44.1 ' ,
# ' id ' : 1,
# ' ra ' : ' 08:57:57.19 ' }
# TRANSIENT PASSED VIA COMMAND - LINE
else :
if not self . name :
name = "transient"
else :
name = self . name
transient = { 'name' : name , 'object_classification' : None , 'dec' : self . dec , 'id' : name , 'ra' : self . ra }
transientsMetadataList = [ transient ]
remaining = 0
if self . oneRun :
remaining = 0
if len ( transientsMetadataList ) == 0 :
if self . daemonMode == False :
remaining = 0
print "No transients need classified"
return None , None
else :
print "No remaining transients need classified, will try again in 5 mins"
time . sleep ( "10" )
# FROM THE LOCATIONS OF THE TRANSIENTS , CHECK IF OUR LOCAL NED DATABASE
# NEEDS UPDATED
if self . updateNed :
self . _update_ned_stream ( transientsMetadataList = transientsMetadataList )
# SOME TESTING SHOWED THAT 25 IS GOOD
total = len ( transientsMetadataList )
batches = int ( ( float ( total ) / float ( miniBatchSize ) ) + 1. )
if batches == 0 :
batches = 1
start = 0
end = 0
theseBatches = [ ]
for i in range ( batches ) :
end = end + miniBatchSize
start = i * miniBatchSize
thisBatch = transientsMetadataList [ start : end ]
theseBatches . append ( thisBatch )
print "BATCH SIZE = %(total)s" % locals ( )
print "MINI BATCH SIZE = %(batches)s x %(miniBatchSize)s" % locals ( )
# DEFINE AN INPUT ARRAY
# cores = psutil . cpu _ count ( )
# if cores > 8:
# cores = 8
start_time2 = time . time ( )
print "START CROSSMATCH"
crossmatchArray = fmultiprocess ( log = self . log , function = self . _crossmatch_transients_against_catalogues , inputArray = range ( len ( theseBatches ) ) , poolSize = None , colMaps = colMaps )
print "FINISH CROSSMATCH/START RANKING: %d" % ( time . time ( ) - start_time2 , )
start_time2 = time . time ( )
classifications = { }
crossmatches = [ ]
for sublist in crossmatchArray :
sublist = sorted ( sublist , key = itemgetter ( 'transient_object_id' ) )
# REORGANISE INTO INDIVIDUAL TRANSIENTS FOR RANKING AND
# TOP - LEVEL CLASSIFICATION EXTRACTION
batch = [ ]
if len ( sublist ) != 0 :
transientId = sublist [ 0 ] [ 'transient_object_id' ]
for s in sublist :
if s [ 'transient_object_id' ] != transientId : # RANK TRANSIENT CROSSMATCH BATCH
cl , cr = self . _rank_classifications ( batch , colMaps )
crossmatches . extend ( cr )
classifications = dict ( classifications . items ( ) + cl . items ( ) )
transientId = s [ 'transient_object_id' ]
batch = [ s ]
else :
batch . append ( s )
# RANK FINAL BATCH
cl , cr = self . _rank_classifications ( batch , colMaps )
classifications = dict ( classifications . items ( ) + cl . items ( ) )
crossmatches . extend ( cr )
for t in transientsMetadataList :
if t [ "id" ] not in classifications :
classifications [ t [ "id" ] ] = [ "ORPHAN" ]
if self . cl :
self . _print_results_to_stdout ( classifications = classifications , crossmatches = crossmatches )
# UPDATE THE TRANSIENT DATABASE IF UPDATE REQUESTED ( ADD DATA TO
# tcs _ crossmatch _ table AND A CLASSIFICATION TO THE ORIGINAL TRANSIENT
# TABLE )
print "FINISH RANKING/START UPDATING TRANSIENT DB: %d" % ( time . time ( ) - start_time2 , )
start_time2 = time . time ( )
if self . update and not self . ra :
self . _update_transient_database ( crossmatches = crossmatches , classifications = classifications , transientsMetadataList = transientsMetadataList , colMaps = colMaps )
print "FINISH UPDATING TRANSIENT DB/START ANNOTATING TRANSIENT DB: %d" % ( time . time ( ) - start_time2 , )
start_time2 = time . time ( )
if self . ra :
return classifications , crossmatches
if self . updatePeakMags and self . settings [ "database settings" ] [ "transients" ] [ "transient peak magnitude query" ] :
self . update_peak_magnitudes ( )
self . update_classification_annotations_and_summaries ( self . updatePeakMags )
print "FINISH ANNOTATING TRANSIENT DB: %d" % ( time . time ( ) - start_time2 , )
start_time2 = time . time ( )
classificationRate = count / ( time . time ( ) - start_time )
print "Sherlock is classify at a rate of %(classificationRate)2.1f transients/sec" % locals ( )
self . log . debug ( 'completed the ``classify`` method' )
return None , None |
def parse_section_entry_points ( self , section_options ) :
"""Parses ` entry _ points ` configuration file section .
: param dict section _ options :""" | parsed = self . _parse_section_to_dict ( section_options , self . _parse_list )
self [ 'entry_points' ] = parsed |
def from_jwt ( self , txt , keyjar , verify = True , ** kwargs ) :
"""Given a signed and / or encrypted JWT , verify its correctness and then
create a class instance from the content .
: param txt : The JWT
: param key : keys that might be used to decrypt and / or verify the
signature of the JWT
: param verify : Whether the signature should be verified or not
: param keyjar : A KeyJar that might contain the necessary key .
: param kwargs : Extra key word arguments
: return : A class instance""" | algarg = { }
if 'encalg' in kwargs :
algarg [ 'alg' ] = kwargs [ 'encalg' ]
if 'encenc' in kwargs :
algarg [ 'enc' ] = kwargs [ 'encenc' ]
_decryptor = jwe_factory ( txt , ** algarg )
if _decryptor :
logger . debug ( "JWE headers: {}" . format ( _decryptor . jwt . headers ) )
dkeys = keyjar . get_decrypt_key ( owner = "" )
logger . debug ( 'Decrypt class: {}' . format ( _decryptor . __class__ ) )
_res = _decryptor . decrypt ( txt , dkeys )
logger . debug ( 'decrypted message:{}' . format ( _res ) )
if isinstance ( _res , tuple ) :
txt = as_unicode ( _res [ 0 ] )
elif isinstance ( _res , list ) and len ( _res ) == 2 :
txt = as_unicode ( _res [ 0 ] )
else :
txt = as_unicode ( _res )
self . jwe_header = _decryptor . jwt . headers
try :
_verifier = jws_factory ( txt , alg = kwargs [ 'sigalg' ] )
except :
_verifier = jws_factory ( txt )
if _verifier :
try :
_jwt = _verifier . jwt
jso = _jwt . payload ( )
_header = _jwt . headers
key = [ ]
# if " sender " in kwargs :
# key . extend ( keyjar . get _ verify _ key ( owner = kwargs [ " sender " ] ) )
logger . debug ( "Raw JSON: {}" . format ( jso ) )
logger . debug ( "JWS header: {}" . format ( _header ) )
if _header [ "alg" ] == "none" :
pass
elif verify :
if keyjar :
key . extend ( keyjar . get_jwt_verify_keys ( _jwt , ** kwargs ) )
if "alg" in _header and _header [ "alg" ] != "none" :
if not key :
raise MissingSigningKey ( "alg=%s" % _header [ "alg" ] )
logger . debug ( "Found signing key." )
try :
_verifier . verify_compact ( txt , key )
except NoSuitableSigningKeys :
if keyjar :
update_keyjar ( keyjar )
key = keyjar . get_jwt_verify_keys ( _jwt , ** kwargs )
_verifier . verify_compact ( txt , key )
except Exception :
raise
else :
self . jws_header = _jwt . headers
else :
jso = json . loads ( txt )
self . jwt = txt
return self . from_dict ( jso ) |
def colDelete ( self , colI = - 1 ) :
"""delete a column at a single index . Negative numbers count from the end .""" | # print ( " DELETING COLUMN : [ % d ] % s " % ( colI , self . colDesc [ colI ] ) )
self . colNames . pop ( colI )
self . colDesc . pop ( colI )
self . colUnits . pop ( colI )
self . colComments . pop ( colI )
self . colTypes . pop ( colI )
self . colData . pop ( colI )
return |
def _usage_endpoint ( self , endpoint , year = None , month = None ) :
"""Common helper for getting usage and billing reports with
optional year and month URL elements .
: param str endpoint : Cloudant usage endpoint .
: param int year : Year to query against . Optional parameter .
Defaults to None . If used , it must be accompanied by ` ` month ` ` .
: param int month : Month to query against that must be an integer
between 1 and 12 . Optional parameter . Defaults to None .
If used , it must be accompanied by ` ` year ` ` .""" | err = False
if year is None and month is None :
resp = self . r_session . get ( endpoint )
else :
try :
if int ( year ) > 0 and int ( month ) in range ( 1 , 13 ) :
resp = self . r_session . get ( '/' . join ( ( endpoint , str ( int ( year ) ) , str ( int ( month ) ) ) ) )
else :
err = True
except ( ValueError , TypeError ) :
err = True
if err :
raise CloudantArgumentError ( 101 , year , month )
resp . raise_for_status ( )
return response_to_json_dict ( resp ) |
def versions ( ) :
'''Check the version of active minions
CLI Example :
. . code - block : : bash
salt - run manage . versions''' | ret = { }
client = salt . client . get_local_client ( __opts__ [ 'conf_file' ] )
try :
minions = client . cmd ( '*' , 'test.version' , timeout = __opts__ [ 'timeout' ] )
except SaltClientError as client_error :
print ( client_error )
return ret
labels = { - 2 : 'Minion offline' , - 1 : 'Minion requires update' , 0 : 'Up to date' , 1 : 'Minion newer than master' , 2 : 'Master' , }
version_status = { }
master_version = salt . version . __saltstack_version__
for minion in minions :
if not minions [ minion ] :
minion_version = False
ver_diff = - 2
else :
minion_version = salt . version . SaltStackVersion . parse ( minions [ minion ] )
ver_diff = salt . utils . compat . cmp ( minion_version , master_version )
if ver_diff not in version_status :
version_status [ ver_diff ] = { }
if minion_version :
version_status [ ver_diff ] [ minion ] = minion_version . string
else :
version_status [ ver_diff ] [ minion ] = minion_version
# Add version of Master to output
version_status [ 2 ] = master_version . string
for key in version_status :
if key == 2 :
ret [ labels [ key ] ] = version_status [ 2 ]
else :
for minion in sorted ( version_status [ key ] ) :
ret . setdefault ( labels [ key ] , { } ) [ minion ] = version_status [ key ] [ minion ]
return ret |
def _blast ( bvname2vals , name_map ) :
"""Helper function to expand ( blast ) str - > int map into str - >
bool map . This is used to send word level inputs to aiger .""" | if len ( name_map ) == 0 :
return dict ( )
return fn . merge ( * ( dict ( zip ( names , bvname2vals [ bvname ] ) ) for bvname , names in name_map ) ) |
def Point2HexColor ( a , lfrac , tfrac ) :
"""Return web - safe hex triplets .""" | [ H , S , V ] = [ math . floor ( 360 * a ) , lfrac , tfrac ]
RGB = hsvToRGB ( H , S , V )
H = [ hex ( int ( math . floor ( 255 * x ) ) ) for x in RGB ]
HEX = [ a [ a . find ( 'x' ) + 1 : ] for a in H ]
HEX = [ '0' + h if len ( h ) == 1 else h for h in HEX ]
return '#' + '' . join ( HEX ) |
def group_dict_by_value ( d : dict ) -> dict :
"""Group a dictionary by values .
Parameters
d : dict
Input dictionary
Returns
dict
Output dictionary . The keys are the values of the initial dictionary
and the values ae given by a list of keys corresponding to the value .
> > > group _ dict _ by _ value ( { 2 : 3 , 1 : 2 , 3 : 1 } )
{3 : [ 2 ] , 2 : [ 1 ] , 1 : [ 3 ] }
> > > group _ dict _ by _ value ( { 2 : 3 , 1 : 2 , 3 : 1 , 10:1 , 12 : 3 } )
{3 : [ 2 , 12 ] , 2 : [ 1 ] , 1 : [ 3 , 10 ] }""" | d_out = { }
for k , v in d . items ( ) :
if v in d_out :
d_out [ v ] . append ( k )
else :
d_out [ v ] = [ k ]
return d_out |
def getReffs ( self , textId , level = 1 , subreference = None ) :
"""Retrieve the siblings of a textual node
: param textId : CtsTextMetadata Identifier
: type textId : str
: param level : Depth for retrieval
: type level : int
: param subreference : CapitainsCtsPassage Reference
: type subreference : str
: return : List of references
: rtype : [ str ]""" | depth = level
if subreference :
textId = "{}:{}" . format ( textId , subreference )
if subreference :
if isinstance ( subreference , CtsReference ) :
depth += subreference . depth
else :
depth += ( CtsReference ( subreference ) ) . depth
if level :
level = max ( depth , level )
return self . getValidReff ( urn = textId , level = level ) |
def get_provider_metadata ( self ) :
"""Gets the metadata for a provider .
return : ( osid . Metadata ) - metadata for the provider
* compliance : mandatory - - This method must be implemented . *""" | metadata = dict ( self . _provider_metadata )
metadata . update ( { 'existing_id_values' : self . my_osid_object_form . _my_map [ 'providerId' ] } )
return Metadata ( ** metadata ) |
def update_selection_sm_prior ( self ) :
"""State machine prior update of tree selection""" | if self . _do_selection_update :
return
self . _do_selection_update = True
tree_selection , selected_model_list , sm_selection , sm_selected_model_list = self . get_selections ( )
if tree_selection is not None :
for path , row in enumerate ( self . list_store ) :
model = row [ self . MODEL_STORAGE_ID ]
if model not in sm_selected_model_list and model in selected_model_list :
tree_selection . unselect_path ( Gtk . TreePath . new_from_indices ( [ path ] ) )
if model in sm_selected_model_list and model not in selected_model_list :
tree_selection . select_path ( Gtk . TreePath . new_from_indices ( [ path ] ) )
self . _do_selection_update = False |
def get_partition_leaders ( cluster_config ) :
"""Return the current leaders of all partitions . Partitions are
returned as a " topic - partition " string .
: param cluster _ config : the cluster
: type cluster _ config : kafka _ utils . utils . config . ClusterConfig
: returns : leaders for partitions
: rtype : map of ( " topic - partition " , broker _ id ) pairs""" | client = KafkaClient ( cluster_config . broker_list )
result = { }
for topic , topic_data in six . iteritems ( client . topic_partitions ) :
for partition , p_data in six . iteritems ( topic_data ) :
topic_partition = topic + "-" + str ( partition )
result [ topic_partition ] = p_data . leader
return result |
def info ( ctx ) :
"""Display status of PIV application .""" | controller = ctx . obj [ 'controller' ]
click . echo ( 'PIV version: %d.%d.%d' % controller . version )
# Largest possible number of PIN tries to get back is 15
tries = controller . get_pin_tries ( )
tries = '15 or more.' if tries == 15 else tries
click . echo ( 'PIN tries remaining: %s' % tries )
if controller . puk_blocked :
click . echo ( 'PUK blocked.' )
if controller . has_derived_key :
click . echo ( 'Management key is derived from PIN.' )
if controller . has_stored_key :
click . echo ( 'Management key is stored on the YubiKey, protected by PIN.' )
try :
chuid = b2a_hex ( controller . get_data ( OBJ . CHUID ) ) . decode ( )
except APDUError as e :
if e . sw == SW . NOT_FOUND :
chuid = 'No data available.'
click . echo ( 'CHUID:\t' + chuid )
try :
ccc = b2a_hex ( controller . get_data ( OBJ . CAPABILITY ) ) . decode ( )
except APDUError as e :
if e . sw == SW . NOT_FOUND :
ccc = 'No data available.'
click . echo ( 'CCC: \t' + ccc )
for ( slot , cert ) in controller . list_certificates ( ) . items ( ) :
click . echo ( 'Slot %02x:' % slot )
try : # Try to read out full DN , fallback to only CN .
# Support for DN was added in crytography 2.5
subject_dn = cert . subject . rfc4514_string ( )
issuer_dn = cert . issuer . rfc4514_string ( )
print_dn = True
except AttributeError :
print_dn = False
logger . debug ( 'Failed to read DN, falling back to only CNs' )
subject_cn = cert . subject . get_attributes_for_oid ( x509 . NameOID . COMMON_NAME )
subject_cn = subject_cn [ 0 ] . value if subject_cn else 'None'
issuer_cn = cert . issuer . get_attributes_for_oid ( x509 . NameOID . COMMON_NAME )
issuer_cn = issuer_cn [ 0 ] . value if issuer_cn else 'None'
except ValueError as e : # Malformed certificates may throw ValueError
logger . debug ( 'Failed parsing certificate' , exc_info = e )
click . echo ( '\tMalformed certificate: {}' . format ( e ) )
continue
fingerprint = b2a_hex ( cert . fingerprint ( hashes . SHA256 ( ) ) ) . decode ( 'ascii' )
algo = ALGO . from_public_key ( cert . public_key ( ) )
serial = cert . serial_number
not_before = cert . not_valid_before
not_after = cert . not_valid_after
# Print out everything
click . echo ( '\tAlgorithm:\t%s' % algo . name )
if print_dn :
click . echo ( '\tSubject DN:\t%s' % subject_dn )
click . echo ( '\tIssuer DN:\t%s' % issuer_dn )
else :
click . echo ( '\tSubject CN:\t%s' % subject_cn )
click . echo ( '\tIssuer CN:\t%s' % issuer_cn )
click . echo ( '\tSerial:\t\t%s' % serial )
click . echo ( '\tFingerprint:\t%s' % fingerprint )
click . echo ( '\tNot before:\t%s' % not_before )
click . echo ( '\tNot after:\t%s' % not_after ) |
def gpgga_to_dms ( gpgga ) :
'''Convert GPS coordinate in GPGGA format to degree / minute / second
Reference : http : / / us . cactii . net / ~ bb / gps . py''' | deg_min , dmin = gpgga . split ( '.' )
degrees = int ( deg_min [ : - 2 ] )
minutes = float ( '%s.%s' % ( deg_min [ - 2 : ] , dmin ) )
decimal = degrees + ( minutes / 60 )
return decimal |
def set_identify ( self , on = True , duration = None ) :
"""Request identify light
Request the identify light to turn off , on for a duration ,
or on indefinitely . Other than error exceptions ,
: param on : Set to True to force on or False to force off
: param duration : Set if wanting to request turn on for a duration
rather than indefinitely on""" | self . oem_init ( )
try :
self . _oem . set_identify ( on , duration )
return
except exc . UnsupportedFunctionality :
pass
if duration is not None :
duration = int ( duration )
if duration > 255 :
duration = 255
if duration < 0 :
duration = 0
response = self . raw_command ( netfn = 0 , command = 4 , data = [ duration ] )
if 'error' in response :
raise exc . IpmiException ( response [ 'error' ] )
return
forceon = 0
if on :
forceon = 1
if self . ipmi_session . ipmiversion < 2.0 : # ipmi 1.5 made due with just one byte , make best effort
# to imitate indefinite as close as possible
identifydata = [ 255 * forceon ]
else :
identifydata = [ 0 , forceon ]
response = self . raw_command ( netfn = 0 , command = 4 , data = identifydata )
if 'error' in response :
raise exc . IpmiException ( response [ 'error' ] ) |
def set_all_pwm ( self , on , off ) :
"""Sets all PWM channels .""" | self . i2c . write8 ( ALL_LED_ON_L , on & 0xFF )
self . i2c . write8 ( ALL_LED_ON_H , on >> 8 )
self . i2c . write8 ( ALL_LED_OFF_L , off & 0xFF )
self . i2c . write8 ( ALL_LED_OFF_H , off >> 8 ) |
def _cancel_outstanding ( self ) :
"""Cancel all of our outstanding requests""" | for d in list ( self . _outstanding ) :
d . addErrback ( lambda _ : None )
# Eat any uncaught errors
d . cancel ( ) |
def mfccInitFilterBanks ( fs , nfft ) :
"""Computes the triangular filterbank for MFCC computation
( used in the stFeatureExtraction function before the stMFCC function call )
This function is taken from the scikits . talkbox library ( MIT Licence ) :
https : / / pypi . python . org / pypi / scikits . talkbox""" | # filter bank params :
lowfreq = 133.33
linsc = 200 / 3.
logsc = 1.0711703
numLinFiltTotal = 13
numLogFilt = 27
if fs < 8000 :
nlogfil = 5
# Total number of filters
nFiltTotal = numLinFiltTotal + numLogFilt
# Compute frequency points of the triangle :
freqs = numpy . zeros ( nFiltTotal + 2 )
freqs [ : numLinFiltTotal ] = lowfreq + numpy . arange ( numLinFiltTotal ) * linsc
freqs [ numLinFiltTotal : ] = freqs [ numLinFiltTotal - 1 ] * logsc ** numpy . arange ( 1 , numLogFilt + 3 )
heights = 2. / ( freqs [ 2 : ] - freqs [ 0 : - 2 ] )
# Compute filterbank coeff ( in fft domain , in bins )
fbank = numpy . zeros ( ( nFiltTotal , nfft ) )
nfreqs = numpy . arange ( nfft ) / ( 1. * nfft ) * fs
for i in range ( nFiltTotal ) :
lowTrFreq = freqs [ i ]
cenTrFreq = freqs [ i + 1 ]
highTrFreq = freqs [ i + 2 ]
lid = numpy . arange ( numpy . floor ( lowTrFreq * nfft / fs ) + 1 , numpy . floor ( cenTrFreq * nfft / fs ) + 1 , dtype = numpy . int )
lslope = heights [ i ] / ( cenTrFreq - lowTrFreq )
rid = numpy . arange ( numpy . floor ( cenTrFreq * nfft / fs ) + 1 , numpy . floor ( highTrFreq * nfft / fs ) + 1 , dtype = numpy . int )
rslope = heights [ i ] / ( highTrFreq - cenTrFreq )
fbank [ i ] [ lid ] = lslope * ( nfreqs [ lid ] - lowTrFreq )
fbank [ i ] [ rid ] = rslope * ( highTrFreq - nfreqs [ rid ] )
return fbank , freqs |
def tenant_absent ( name , profile = None , ** connection_args ) :
'''Ensure that the keystone tenant is absent .
name
The name of the tenant that should not exist''' | ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : 'Tenant / project "{0}" is already absent' . format ( name ) }
# Check if tenant is present
tenant = __salt__ [ 'keystone.tenant_get' ] ( name = name , profile = profile , ** connection_args )
if 'Error' not in tenant :
if __opts__ . get ( 'test' ) :
ret [ 'result' ] = None
ret [ 'comment' ] = 'Tenant / project "{0}" will be deleted' . format ( name )
return ret
# Delete tenant
__salt__ [ 'keystone.tenant_delete' ] ( name = name , profile = profile , ** connection_args )
ret [ 'comment' ] = 'Tenant / project "{0}" has been deleted' . format ( name )
ret [ 'changes' ] [ 'Tenant/Project' ] = 'Deleted'
return ret |
def supported_tags ( self , interpreter = None , force_manylinux = True ) :
"""Returns a list of supported PEP425 tags for the current platform .""" | if interpreter and not self . is_extended : # N . B . If we don ' t get an extended platform specifier , we generate
# all possible ABI permutations to mimic earlier pex version
# behavior and make cross - platform resolution more intuitive .
return _get_supported_for_any_abi ( platform = self . platform , impl = interpreter . identity . abbr_impl , version = interpreter . identity . impl_ver , force_manylinux = force_manylinux )
else :
return _get_supported ( platform = self . platform , impl = self . impl , version = self . version , abi = self . abi , force_manylinux = force_manylinux ) |
def get_task_doc ( self , path ) :
"""Get the entire task doc for a path , including any post - processing .""" | logger . info ( "Getting task doc for base dir :{}" . format ( path ) )
files = os . listdir ( path )
vasprun_files = OrderedDict ( )
if "STOPCAR" in files : # Stopped runs . Try to parse as much as possible .
logger . info ( path + " contains stopped run" )
for r in self . runs :
if r in files : # try subfolder schema
for f in os . listdir ( os . path . join ( path , r ) ) :
if fnmatch ( f , "vasprun.xml*" ) :
vasprun_files [ r ] = os . path . join ( r , f )
else : # try extension schema
for f in files :
if fnmatch ( f , "vasprun.xml.{}*" . format ( r ) ) :
vasprun_files [ r ] = f
if len ( vasprun_files ) == 0 :
for f in files : # get any vasprun from the folder
if fnmatch ( f , "vasprun.xml*" ) and f not in vasprun_files . values ( ) :
vasprun_files [ 'standard' ] = f
if len ( vasprun_files ) > 0 :
d = self . generate_doc ( path , vasprun_files )
if not d :
d = self . process_killed_run ( path )
self . post_process ( path , d )
elif ( not ( path . endswith ( "relax1" ) or path . endswith ( "relax2" ) ) ) and contains_vasp_input ( path ) : # If not Materials Project style , process as a killed run .
logger . warning ( path + " contains killed run" )
d = self . process_killed_run ( path )
self . post_process ( path , d )
else :
raise ValueError ( "No VASP files found!" )
return d |
def encipher ( self , string ) :
"""Encipher string using Enigma M3 cipher according to initialised key . Punctuation and whitespace
are removed from the input .
Example : :
ciphertext = Enigma ( settings = ( ' A ' , ' A ' , ' A ' ) , rotors = ( 1,2,3 ) , reflector = ' B ' ,
ringstellung = ( ' F ' , ' V ' , ' N ' ) , steckers = [ ( ' P ' , ' O ' ) , ( ' M ' , ' L ' ) ,
( ' I ' , ' U ' ) , ( ' K ' , ' J ' ) , ( ' N ' , ' H ' ) , ( ' Y ' , ' T ' ) , ( ' G ' , ' B ' ) , ( ' V ' , ' F ' ) ,
( ' R ' , ' E ' ) , ( ' D ' , ' C ' ) ] ) ) . encipher ( plaintext )
: param string : The string to encipher .
: returns : The enciphered string .""" | string = self . remove_punctuation ( string )
ret = ''
for c in string . upper ( ) :
if c . isalpha ( ) :
ret += self . encipher_char ( c )
else :
ret += c
return ret |
def raise_on_failure ( mainfunc ) :
"""raise if and only if mainfunc fails""" | try :
errors = mainfunc ( )
if errors :
exit ( errors )
except CalledProcessError as error :
exit ( error . returncode )
except SystemExit as error :
if error . code :
raise
except KeyboardInterrupt : # I don ' t plan to test - cover this . : pragma : nocover :
exit ( 1 ) |
def is_valid ( self ) :
'''Validate form .
Return True if Django validates the form , the username obeys the parameters , and passwords match .
Return False otherwise .''' | if not super ( ProfileRequestForm , self ) . is_valid ( ) :
return False
validity = True
if self . cleaned_data [ 'password' ] != self . cleaned_data [ 'confirm_password' ] :
form_add_error ( self , 'password' , "Passwords don't match." )
form_add_error ( self , 'confirm_password' , "Passwords don't match." )
validity = False
return validity |
def tasks ( self , ** opts ) :
"""Convenience wrapper around listTasks ( . . . ) for this channel ID .
Tasks are sorted by priority and creation time .
: param * * opts : " opts " dict to the listTasks RPC .
For example , " state = [ task _ states . OPEN ] " will return
only the " OPEN " tasks .
: returns : deferred that when fired returns a list of Tasks .""" | opts [ 'channel_id' ] = self . id
qopts = { 'order' : 'priority,create_time' }
return self . connection . listTasks ( opts , qopts ) |
def guess_base_branch ( ) : # type : ( str ) - > Optional [ str , None ]
"""Try to guess the base branch for the current branch .
Do not trust this guess . git makes it pretty much impossible to guess
the base branch reliably so this function implements few heuristics that
will work on most common use cases but anything a bit crazy will probably
trip this function .
Returns :
Optional [ str ] : The name of the base branch for the current branch if
guessable or * * None * * if can ' t guess .""" | my_branch = current_branch ( refresh = True ) . name
curr = latest_commit ( )
if len ( curr . branches ) > 1 : # We ' re possibly at the beginning of the new branch ( currently both
# on base and new branch ) .
other = [ x for x in curr . branches if x != my_branch ]
if len ( other ) == 1 :
return other [ 0 ]
return None
else : # We ' re on one branch
parent = curr
while parent and my_branch in parent . branches :
curr = parent
if len ( curr . branches ) > 1 :
other = [ x for x in curr . branches if x != my_branch ]
if len ( other ) == 1 :
return other [ 0 ]
return None
parents = [ p for p in curr . parents if my_branch in p . branches ]
num_parents = len ( parents )
if num_parents > 2 : # More than two parent , give up
return None
if num_parents == 2 : # This is a merge commit .
for p in parents :
if p . branches == [ my_branch ] :
parent = p
break
elif num_parents == 1 :
parent = parents [ 0 ]
elif num_parents == 0 :
parent = None
return None |
def as_crispy_errors ( form , template_pack = TEMPLATE_PACK ) :
"""Renders only form errors the same way as django - crispy - forms : :
{ % load crispy _ forms _ tags % }
{ { form | as _ crispy _ errors } }
or : :
{ { form | as _ crispy _ errors : " bootstrap " } }""" | if isinstance ( form , BaseFormSet ) :
template = get_template ( '%s/errors_formset.html' % template_pack )
c = Context ( { 'formset' : form } ) . flatten ( )
else :
template = get_template ( '%s/errors.html' % template_pack )
c = Context ( { 'form' : form } ) . flatten ( )
return template . render ( c ) |
def _shru16 ( ins ) :
'''Logical right shift 16bit unsigned integer .
The result is pushed onto the stack .
Optimizations :
* If 2nd op is 0 then
do nothing
* If 2nd op is 1
Shift Right Arithmetic''' | op1 , op2 = tuple ( ins . quad [ 2 : ] )
if is_int ( op2 ) :
op = int16 ( op2 )
if op == 0 :
return [ ]
output = _16bit_oper ( op1 )
if op == 1 :
output . append ( 'srl h' )
output . append ( 'rr l' )
output . append ( 'push hl' )
return output
output . append ( 'ld b, %i' % op )
else :
output = _8bit_oper ( op2 )
output . append ( 'ld b, a' )
output . extend ( _16bit_oper ( op1 ) )
label = tmp_label ( )
output . append ( '%s:' % label )
output . append ( 'srl h' )
output . append ( 'rr l' )
output . append ( 'djnz %s' % label )
output . append ( 'push hl' )
return output |
def rank ( self , dte ) :
'''The rank of a given * dte * in the timeseries''' | timestamp = self . pickler . dumps ( dte )
return self . backend_structure ( ) . rank ( timestamp ) |
def get_repo_hooks ( self , auth , username , repo_name ) :
"""Returns all hooks of repository with name ` ` repo _ name ` ` owned by
the user with username ` ` username ` ` .
: param auth . Authentication auth : authentication object
: param str username : username of owner of repository
: param str repo _ name : name of repository
: return : a list of hooks for the specified repository
: rtype : List [ GogsRepo . Hooks ]
: raises NetworkFailure : if there is an error communicating with the server
: raises ApiFailure : if the request cannot be serviced""" | path = "/repos/{u}/{r}/hooks" . format ( u = username , r = repo_name )
response = self . get ( path , auth = auth )
return [ GogsRepo . Hook . from_json ( hook ) for hook in response . json ( ) ] |
def set_foreground ( self , fg , isRGBA = None ) :
"""Set the foreground color . fg can be a matlab format string , a
html hex color string , an rgb unit tuple , or a float between 0
and 1 . In the latter case , grayscale is used .""" | # Implementation note : wxPython has a separate concept of pen and
# brush - the brush fills any outline trace left by the pen .
# Here we set both to the same colour - if a figure is not to be
# filled , the renderer will set the brush to be transparent
# Same goes for text foreground . . .
DEBUG_MSG ( "set_foreground()" , 1 , self )
self . select ( )
GraphicsContextBase . set_foreground ( self , fg , isRGBA )
self . _pen . SetColour ( self . get_wxcolour ( self . get_rgb ( ) ) )
self . gfx_ctx . SetPen ( self . _pen )
self . unselect ( ) |
def start_process ( self , program , arguments = None , working_dir = None , print_command = True , use_pseudo_terminal = True , env = None ) :
"""Starts the child process .
: param program : program to start
: param arguments : list of program arguments
: param working _ dir : working directory of the child process
: param print _ command : True to print the full command ( pgm + arguments ) as the first line
of the output window
: param use _ pseudo _ terminal : True to use a pseudo terminal on Unix ( pty ) , False to avoid
using a pty wrapper . When using a pty wrapper , both stdout and stderr are merged together .
: param environ : environment variables to set on the child process . If None , os . environ will be used .""" | # clear previous output
self . clear ( )
self . setReadOnly ( False )
if arguments is None :
arguments = [ ]
if sys . platform != 'win32' and use_pseudo_terminal :
pgm = sys . executable
args = [ pty_wrapper . __file__ , program ] + arguments
self . flg_use_pty = use_pseudo_terminal
else :
pgm = program
args = arguments
self . flg_use_pty = False
# pty not available on windows
self . _process . setProcessEnvironment ( self . _setup_process_environment ( env ) )
if working_dir :
self . _process . setWorkingDirectory ( working_dir )
if print_command :
self . _formatter . append_message ( '\x1b[0m%s %s\n' % ( program , ' ' . join ( arguments ) ) , output_format = OutputFormat . CustomFormat )
self . _process . start ( pgm , args ) |
def update_profile_banner ( self , filename , ** kargs ) :
""": reference : https : / / dev . twitter . com / rest / reference / post / account / update _ profile _ banner
: allowed _ param : ' width ' , ' height ' , ' offset _ left ' , ' offset _ right '""" | f = kargs . pop ( 'file' , None )
headers , post_data = API . _pack_image ( filename , 700 , form_field = 'banner' , f = f )
bind_api ( api = self , path = '/account/update_profile_banner.json' , method = 'POST' , allowed_param = [ 'width' , 'height' , 'offset_left' , 'offset_right' ] , require_auth = True ) ( post_data = post_data , headers = headers ) |
def patch_namespaced_replica_set ( self , name , namespace , body , ** kwargs ) : # noqa : E501
"""patch _ namespaced _ replica _ set # noqa : E501
partially update the specified ReplicaSet # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . patch _ namespaced _ replica _ set ( name , namespace , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the ReplicaSet ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param UNKNOWN _ BASE _ TYPE body : ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: return : V1beta1ReplicaSet
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . patch_namespaced_replica_set_with_http_info ( name , namespace , body , ** kwargs )
# noqa : E501
else :
( data ) = self . patch_namespaced_replica_set_with_http_info ( name , namespace , body , ** kwargs )
# noqa : E501
return data |
def request_search ( self , txt = None ) :
"""Requests a search operation .
: param txt : The text to replace . If None , the content of lineEditSearch
is used instead .""" | if self . checkBoxRegex . isChecked ( ) :
try :
re . compile ( self . lineEditSearch . text ( ) , re . DOTALL )
except sre_constants . error as e :
self . _show_error ( e )
return
else :
self . _show_error ( None )
if txt is None or isinstance ( txt , int ) :
txt = self . lineEditSearch . text ( )
if txt :
self . job_runner . request_job ( self . _exec_search , txt , self . _search_flags ( ) )
else :
self . job_runner . cancel_requests ( )
self . _clear_occurrences ( )
self . _on_search_finished ( ) |
def do_direct_payment ( self , paymentaction = "Sale" , ** kwargs ) :
"""Shortcut for the DoDirectPayment method .
` ` paymentaction ` ` could be ' Authorization ' or ' Sale '
To issue a Sale immediately : :
charge = {
' amt ' : ' 10.00 ' ,
' creditcardtype ' : ' Visa ' ,
' acct ' : ' 4812177017895760 ' ,
' expdate ' : ' 012010 ' ,
' cvv2 ' : ' 962 ' ,
' firstname ' : ' John ' ,
' lastname ' : ' Doe ' ,
' street ' : ' 1 Main St ' ,
' city ' : ' San Jose ' ,
' state ' : ' CA ' ,
' zip ' : ' 95131 ' ,
' countrycode ' : ' US ' ,
' currencycode ' : ' USD ' ,
direct _ payment ( " Sale " , * * charge )
Or , since " Sale " is the default :
direct _ payment ( * * charge )
To issue an Authorization , simply pass " Authorization " instead
of " Sale " .
You may also explicitly set ` ` paymentaction ` ` as a keyword argument :
direct _ payment ( paymentaction = " Sale " , * * charge )""" | kwargs . update ( self . _sanitize_locals ( locals ( ) ) )
return self . _call ( 'DoDirectPayment' , ** kwargs ) |
def ssh_authorized_keys_lines ( application_name , user = None ) :
"""Return contents of authorized _ keys file for given application .
: param application _ name : Name of application eg nova - compute - something
: type application _ name : str
: param user : The user that the ssh asserts are for .
: type user : str""" | authorized_keys_list = [ ]
with open ( authorized_keys ( application_name , user ) ) as keys :
for authkey_line in keys :
if authkey_line . rstrip ( ) :
authorized_keys_list . append ( authkey_line . rstrip ( ) )
return ( authorized_keys_list ) |
def amalgamate ( A , blocksize ) :
"""Amalgamate matrix A .
Parameters
A : csr _ matrix
Matrix to amalgamate
blocksize : int
blocksize to use while amalgamating
Returns
A _ amal : csr _ matrix
Amalgamated matrix A , first , convert A to BSR with square blocksize
and then return a CSR matrix of ones using the resulting BSR indptr and
indices
Notes
inverse operation of UnAmal for square matrices
Examples
> > > from numpy import array
> > > from scipy . sparse import csr _ matrix
> > > from pyamg . util . utils import amalgamate
> > > row = array ( [ 0,0,1 ] )
> > > col = array ( [ 0,2,1 ] )
> > > data = array ( [ 1,2,3 ] )
> > > A = csr _ matrix ( ( data , ( row , col ) ) , shape = ( 4,4 ) )
> > > A . todense ( )
matrix ( [ [ 1 , 0 , 2 , 0 ] ,
[0 , 3 , 0 , 0 ] ,
[0 , 0 , 0 , 0 ] ,
[0 , 0 , 0 , 0 ] ] )
> > > amalgamate ( A , 2 ) . todense ( )
matrix ( [ [ 1 . , 1 . ] ,
[ 0 . , 0 . ] ] )""" | if blocksize == 1 :
return A
elif sp . mod ( A . shape [ 0 ] , blocksize ) != 0 :
raise ValueError ( "Incompatible blocksize" )
A = A . tobsr ( blocksize = ( blocksize , blocksize ) )
A . sort_indices ( )
subI = ( np . ones ( A . indices . shape ) , A . indices , A . indptr )
shape = ( int ( A . shape [ 0 ] / A . blocksize [ 0 ] ) , int ( A . shape [ 1 ] / A . blocksize [ 1 ] ) )
return csr_matrix ( subI , shape = shape ) |
def highlight_syntax ( src , lang , linenums = False ) :
"""Pass code to the [ Pygments ] ( http : / / pygments . pocoo . org / ) highliter
with optional line numbers . The output should then be styled with CSS
to your liking . No styles are applied by default - only styling hooks
( i . e . : < span class = " k " > ) .""" | src = src . strip ( '\n' )
if not lang :
lexer = TextLexer ( )
else :
try :
lexer = get_lexer_by_name ( lang , stripall = True )
except ValueError :
lexer = TextLexer ( )
formatter = HtmlFormatter ( linenos = linenums , tab_length = TAB_LENGTH )
html = highlight ( src , lexer , formatter )
if lang :
open_code = OPEN_CODE % ( LANG_TAG % ( lang , ) , )
else :
open_code = OPEN_CODE % u''
html = html . replace ( '<div class="highlight"><pre>' , open_code , 1 )
html = html . replace ( '</pre></div>' , CLOSE_CODE )
return html |
def affine_align ( dset_from , dset_to , skull_strip = True , mask = None , suffix = '_aff' , prefix = None , cost = None , epi = False , resample = 'wsinc5' , grid_size = None , opts = [ ] ) :
'''interface to 3dAllineate to align anatomies and EPIs''' | dset_ss = lambda dset : os . path . split ( nl . suffix ( dset , '_ns' ) ) [ 1 ]
def dset_source ( dset ) :
if skull_strip == True or skull_strip == dset :
return dset_ss ( dset )
else :
return dset
dset_affine = prefix
if dset_affine == None :
dset_affine = os . path . split ( nl . suffix ( dset_from , suffix ) ) [ 1 ]
dset_affine_mat_1D = nl . prefix ( dset_affine ) + '_matrix.1D'
dset_affine_par_1D = nl . prefix ( dset_affine ) + '_params.1D'
if os . path . exists ( dset_affine ) : # final product already exists
return
for dset in [ dset_from , dset_to ] :
if skull_strip == True or skull_strip == dset :
nl . skull_strip ( dset , '_ns' )
mask_use = mask
if mask : # the mask was probably made in the space of the original dset _ to anatomy ,
# which has now been cropped from the skull stripping . So the lesion mask
# needs to be resampled to match the corresponding mask
if skull_strip == True or skull_strip == dset_to :
nl . run ( [ '3dresample' , '-master' , dset_u ( dset_ss ( dset ) ) , '-inset' , mask , '-prefix' , nl . suffix ( mask , '_resam' ) ] , products = nl . suffix ( mask , '_resam' ) )
mask_use = nl . suffix ( mask , '_resam' )
all_cmd = [ '3dAllineate' , '-prefix' , dset_affine , '-base' , dset_source ( dset_to ) , '-source' , dset_source ( dset_from ) , '-source_automask' , '-1Dmatrix_save' , dset_affine_mat_1D , '-1Dparam_save' , dset_affine_par_1D , '-autoweight' , '-final' , resample , '-cmass' ] + opts
if grid_size :
all_cmd += [ '-newgrid' , grid_size ]
if cost :
all_cmd += [ '-cost' , cost ]
if epi :
all_cmd += [ '-EPI' ]
if mask :
all_cmd += [ '-emask' , mask_use ]
nl . run ( all_cmd , products = dset_affine ) |
def run ( self , verbose = False ) :
"""Del user modules to force Python to deeply reload them
Do not del modules which are considered as system modules , i . e .
modules installed in subdirectories of Python interpreter ' s binary
Do not del C modules""" | log = [ ]
modules_copy = dict ( sys . modules )
for modname , module in modules_copy . items ( ) :
if modname == 'aaaaa' :
print ( modname , module )
print ( self . previous_modules )
if modname not in self . previous_modules :
modpath = getattr ( module , '__file__' , None )
if modpath is None : # * module * is a C module that is statically linked into the
# interpreter . There is no way to know its path , so we
# choose to ignore it .
continue
if not self . is_module_blacklisted ( modname , modpath ) :
log . append ( modname )
del sys . modules [ modname ]
if verbose and log :
print ( "\x1b[4;33m%s\x1b[24m%s\x1b[0m" % ( "UMD has deleted" , ": " + ", " . join ( log ) ) ) |
def row_factory ( cursor , row ) :
"""Returns a sqlite row factory that returns a dictionary""" | d = { }
for idx , col in enumerate ( cursor . description ) :
d [ col [ 0 ] ] = row [ idx ]
return d |
def get_mean_and_stddevs ( self , sctx , rctx , dctx , imt , stddev_types ) :
"""Returns the mean and standard deviations""" | # Return Distance Tables
imls = self . _return_tables ( rctx . mag , imt , "IMLs" )
# Get distance vector for the given magnitude
idx = numpy . searchsorted ( self . m_w , rctx . mag )
dists = self . distances [ : , 0 , idx - 1 ]
# Get mean and standard deviations
mean = self . _get_mean ( imls , dctx , dists )
stddevs = self . _get_stddevs ( dists , rctx . mag , dctx , imt , stddev_types )
if self . amplification : # Apply amplification
mean_amp , sigma_amp = self . amplification . get_amplification_factors ( imt , sctx , rctx , getattr ( dctx , self . distance_type ) , stddev_types )
mean = numpy . log ( mean ) + numpy . log ( mean_amp )
for iloc in range ( len ( stddev_types ) ) :
stddevs [ iloc ] *= sigma_amp [ iloc ]
return mean , stddevs
else :
return numpy . log ( mean ) , stddevs |
def _get_indexers_coords_and_indexes ( self , indexers ) :
"""Extract coordinates from indexers .
Returns an OrderedDict mapping from coordinate name to the
coordinate variable .
Only coordinate with a name different from any of self . variables will
be attached .""" | from . dataarray import DataArray
coord_list = [ ]
indexes = OrderedDict ( )
for k , v in indexers . items ( ) :
if isinstance ( v , DataArray ) :
v_coords = v . coords
if v . dtype . kind == 'b' :
if v . ndim != 1 : # we only support 1 - d boolean array
raise ValueError ( '{:d}d-boolean array is used for indexing along ' 'dimension {!r}, but only 1d boolean arrays are ' 'supported.' . format ( v . ndim , k ) )
# Make sure in case of boolean DataArray , its
# coordinate also should be indexed .
v_coords = v [ v . values . nonzero ( ) [ 0 ] ] . coords
coord_list . append ( { d : v_coords [ d ] . variable for d in v . coords } )
indexes . update ( v . indexes )
# we don ' t need to call align ( ) explicitly or check indexes for
# alignment , because merge _ variables already checks for exact alignment
# between dimension coordinates
coords = merge_variables ( coord_list )
assert_coordinate_consistent ( self , coords )
# silently drop the conflicted variables .
attached_coords = OrderedDict ( ( k , v ) for k , v in coords . items ( ) if k not in self . _variables )
attached_indexes = OrderedDict ( ( k , v ) for k , v in indexes . items ( ) if k not in self . _variables )
return attached_coords , attached_indexes |
def image_update ( id = None , name = None , profile = None , ** kwargs ) : # pylint : disable = C0103
'''Update properties of given image .
Known to work for :
- min _ ram ( in MB )
- protected ( bool )
- visibility ( ' public ' or ' private ' )
CLI Example :
. . code - block : : bash
salt ' * ' glance . image _ update id = c2eb2eb0-53e1-4a80 - b990-8ec887eae7df
salt ' * ' glance . image _ update name = f16 - jeos''' | if id :
image = image_show ( id = id , profile = profile )
if 'result' in image and not image [ 'result' ] :
return image
elif len ( image ) == 1 :
image = image . values ( ) [ 0 ]
elif name :
img_list = image_list ( name = name , profile = profile )
if img_list is dict and 'result' in img_list :
return img_list
elif not img_list :
return { 'result' : False , 'comment' : 'No image with name \'{0}\' ' 'found.' . format ( name ) }
elif len ( img_list ) == 1 :
try :
image = img_list [ 0 ]
except KeyError :
image = img_list [ name ]
else :
raise SaltInvocationError
log . debug ( 'Found image:\n%s' , image )
to_update = { }
for key , value in kwargs . items ( ) :
if key . startswith ( '_' ) :
continue
if key not in image or image [ key ] != value :
log . debug ( 'add <%s=%s> to to_update' , key , value )
to_update [ key ] = value
g_client = _auth ( profile )
updated = g_client . images . update ( image [ 'id' ] , ** to_update )
return updated |
def _post_md5_skip_on_check ( self , key , md5_match ) : # type : ( Uploader , str , bool ) - > None
"""Perform post MD5 skip on check
: param Uploader self : this
: param str key : md5 map key
: param bool md5 _ match : if MD5 matches""" | with self . _md5_meta_lock :
src , rfile = self . _md5_map . pop ( key )
uid = blobxfer . operations . upload . Uploader . create_unique_id ( src , rfile )
if md5_match :
with self . _upload_lock :
self . _upload_set . remove ( uid )
self . _upload_total -= 1
if self . _general_options . dry_run :
logger . info ( '[DRY RUN] MD5 match, skipping: {} -> {}' . format ( src . absolute_path , rfile . path ) )
else :
if self . _general_options . dry_run :
with self . _upload_lock :
self . _upload_set . remove ( uid )
self . _upload_total -= 1
logger . info ( '[DRY RUN] MD5 mismatch, upload: {} -> {}' . format ( src . absolute_path , rfile . path ) )
else :
self . _add_to_upload_queue ( src , rfile , uid ) |
def add_fog ( img , fog_coef , alpha_coef , haze_list ) :
"""Add fog to the image .
From https : / / github . com / UjjwalSaxena / Automold - - Road - Augmentation - Library
Args :
img ( np . array ) :
fog _ coef ( float ) :
alpha _ coef ( float ) :
haze _ list ( list ) :
Returns :""" | non_rgb_warning ( img )
input_dtype = img . dtype
needs_float = False
if input_dtype == np . float32 :
img = from_float ( img , dtype = np . dtype ( 'uint8' ) )
needs_float = True
elif input_dtype not in ( np . uint8 , np . float32 ) :
raise ValueError ( 'Unexpected dtype {} for RandomFog augmentation' . format ( input_dtype ) )
height , width = img . shape [ : 2 ]
hw = max ( int ( width // 3 * fog_coef ) , 10 )
for haze_points in haze_list :
x , y = haze_points
overlay = img . copy ( )
output = img . copy ( )
alpha = alpha_coef * fog_coef
rad = hw // 2
point = ( x + hw // 2 , y + hw // 2 )
cv2 . circle ( overlay , point , int ( rad ) , ( 255 , 255 , 255 ) , - 1 )
cv2 . addWeighted ( overlay , alpha , output , 1 - alpha , 0 , output )
img = output . copy ( )
image_rgb = cv2 . blur ( img , ( hw // 10 , hw // 10 ) )
if needs_float :
image_rgb = to_float ( image_rgb , max_value = 255 )
return image_rgb |
def check_lengths ( * arrays ) :
"""tool to ensure input and output data have the same number of samples
Parameters
* arrays : iterable of arrays to be checked
Returns
None""" | lengths = [ len ( array ) for array in arrays ]
if len ( np . unique ( lengths ) ) > 1 :
raise ValueError ( 'Inconsistent data lengths: {}' . format ( lengths ) ) |
def get_mask_from_raster ( rasterfile , outmaskfile , keep_nodata = False ) :
"""Generate mask data from a given raster data .
Args :
rasterfile : raster file path .
outmaskfile : output mask file path .
Returns :
Raster object of mask data .""" | raster_r = RasterUtilClass . read_raster ( rasterfile )
xsize = raster_r . nCols
ysize = raster_r . nRows
nodata_value = raster_r . noDataValue
srs = raster_r . srs
x_min = raster_r . xMin
y_max = raster_r . yMax
dx = raster_r . dx
data = raster_r . data
if not keep_nodata :
i_min = ysize - 1
i_max = 0
j_min = xsize - 1
j_max = 0
for i in range ( ysize ) :
for j in range ( xsize ) :
if abs ( data [ i ] [ j ] - nodata_value ) > DELTA :
i_min = min ( i , i_min )
i_max = max ( i , i_max )
j_min = min ( j , j_min )
j_max = max ( j , j_max )
# print ( i _ min , i _ max , j _ min , j _ max )
y_size_mask = i_max - i_min + 1
x_size_mask = j_max - j_min + 1
x_min_mask = x_min + j_min * dx
y_max_mask = y_max - i_min * dx
else :
y_size_mask = ysize
x_size_mask = xsize
x_min_mask = x_min
y_max_mask = y_max
i_min = 0
j_min = 0
print ( '%dx%d -> %dx%d' % ( xsize , ysize , x_size_mask , y_size_mask ) )
mask = numpy . zeros ( ( y_size_mask , x_size_mask ) )
for i in range ( y_size_mask ) :
for j in range ( x_size_mask ) :
if abs ( data [ i + i_min ] [ j + j_min ] - nodata_value ) > DELTA :
mask [ i ] [ j ] = 1
else :
mask [ i ] [ j ] = DEFAULT_NODATA
mask_geotrans = [ x_min_mask , dx , 0 , y_max_mask , 0 , - dx ]
RasterUtilClass . write_gtiff_file ( outmaskfile , y_size_mask , x_size_mask , mask , mask_geotrans , srs , DEFAULT_NODATA , GDT_Int32 )
return Raster ( y_size_mask , x_size_mask , mask , DEFAULT_NODATA , mask_geotrans , srs ) |
def MakeSuiteFromDict ( d , name = '' ) :
"""Makes a suite from a map from values to probabilities .
Args :
d : dictionary that maps values to probabilities
name : string name for this suite
Returns :
Suite object""" | suite = Suite ( name = name )
suite . SetDict ( d )
suite . Normalize ( )
return suite |
def appendBlock ( self , block ) :
'''append / appendBlock - Append a block to this element . A block can be a string ( text node ) , or an AdvancedTag ( tag node )
@ param < str / AdvancedTag > - block to add
@ return - # block
NOTE : To add multiple blocks , @ see appendBlocks
If you know the type , use either @ see appendChild for tags or @ see appendText for text''' | # Determine block type and call appropriate method
if isinstance ( block , AdvancedTag ) :
self . appendNode ( block )
else :
self . appendText ( block )
return block |
def average_sharded_losses ( sharded_losses ) :
"""Average losses across datashards .
Args :
sharded _ losses : list < dict < str loss _ name , Tensor loss > > . The loss
can be a single Tensor or a 2 - tuple ( numerator and denominator ) .
Returns :
losses : dict < str loss _ name , Tensor avg _ loss >""" | losses = { }
for loss_name in sorted ( sharded_losses [ 0 ] ) :
all_shards = [ shard_losses [ loss_name ] for shard_losses in sharded_losses ]
if isinstance ( all_shards [ 0 ] , tuple ) :
sharded_num , sharded_den = zip ( * all_shards )
mean_loss = ( tf . add_n ( sharded_num ) / tf . maximum ( tf . cast ( 1.0 , sharded_den [ 0 ] . dtype ) , tf . add_n ( sharded_den ) ) )
else :
mean_loss = tf . reduce_mean ( all_shards )
losses [ loss_name ] = mean_loss
return losses |
def create_tree_from_string ( line ) :
"""Parse and convert a string representation
of an example into a LabeledTree datastructure .
Arguments :
line : str , string version of the tree .
Returns :
LabeledTree : parsed tree .""" | depth = 0
current_word = ""
root = None
current_node = root
for char in line :
if char == '(' :
if current_node is not None and len ( current_word ) > 0 :
attribute_text_label ( current_node , current_word )
current_word = ""
depth += 1
if depth > 1 : # replace current head node by this node :
child = LabeledTree ( depth = depth )
current_node . add_child ( child )
current_node = child
root . add_general_child ( child )
else :
root = LabeledTree ( depth = depth )
root . add_general_child ( root )
current_node = root
elif char == ')' : # assign current word :
if len ( current_word ) > 0 :
attribute_text_label ( current_node , current_word )
current_word = ""
# go up a level :
depth -= 1
if current_node . parent != None :
current_node . parent . udepth = max ( current_node . udepth + 1 , current_node . parent . udepth )
current_node = current_node . parent
else : # add to current read word
current_word += char
if depth != 0 :
raise ParseError ( "Not an equal amount of closing and opening parentheses" )
return root |
def sort_cyclic_graph_best_effort ( graph , pick_first = 'head' ) :
"""Fallback for cases in which the graph has cycles .""" | ordered = [ ]
visited = set ( )
# Go first on the pick _ first chain then go back again on the others
# that were not visited . Given the way the graph is built both chains
# will always contain all the elements .
if pick_first == 'head' :
fst_attr , snd_attr = ( 'head_node' , 'update_node' )
else :
fst_attr , snd_attr = ( 'update_node' , 'head_node' )
current = FIRST
while current is not None :
visited . add ( current )
current = getattr ( graph [ current ] , fst_attr )
if current not in visited and current is not None :
ordered . append ( current )
current = FIRST
while current is not None :
visited . add ( current )
current = getattr ( graph [ current ] , snd_attr )
if current not in visited and current is not None :
ordered . append ( current )
return ordered |
def _trim_zeros ( * char_lists ) :
"""Trim any zeros from provided character lists
Checks the beginning of any provided lists for ' 0 ' s and removes any
such leading zeros . Operates on ( and possibly ) alters the passed
list
: param list char _ lists : a list or lists of characters
: return : None
: rtype : None""" | logger . debug ( '_trim_zeros(%s)' , char_lists )
for char_list in char_lists :
while len ( char_list ) != 0 and char_list [ 0 ] == '0' :
char_list . pop ( 0 )
logger . debug ( 'updated block: %s' , char_list ) |
def paste_clipboard_data ( self , data , paste_mode = PasteMode . EMACS , count = 1 ) :
"""Return a new : class : ` . Document ` instance which contains the result if
we would paste this data at the current cursor position .
: param paste _ mode : Where to paste . ( Before / after / emacs . )
: param count : When > 1 , Paste multiple times .""" | assert isinstance ( data , ClipboardData )
assert paste_mode in ( PasteMode . VI_BEFORE , PasteMode . VI_AFTER , PasteMode . EMACS )
before = ( paste_mode == PasteMode . VI_BEFORE )
after = ( paste_mode == PasteMode . VI_AFTER )
if data . type == SelectionType . CHARACTERS :
if after :
new_text = ( self . text [ : self . cursor_position + 1 ] + data . text * count + self . text [ self . cursor_position + 1 : ] )
else :
new_text = self . text_before_cursor + data . text * count + self . text_after_cursor
new_cursor_position = self . cursor_position + len ( data . text ) * count
if before :
new_cursor_position -= 1
elif data . type == SelectionType . LINES :
l = self . cursor_position_row
if before :
lines = self . lines [ : l ] + [ data . text ] * count + self . lines [ l : ]
new_text = '\n' . join ( lines )
new_cursor_position = len ( '' . join ( self . lines [ : l ] ) ) + l
else :
lines = self . lines [ : l + 1 ] + [ data . text ] * count + self . lines [ l + 1 : ]
new_cursor_position = len ( '' . join ( self . lines [ : l + 1 ] ) ) + l + 1
new_text = '\n' . join ( lines )
elif data . type == SelectionType . BLOCK :
lines = self . lines [ : ]
start_line = self . cursor_position_row
start_column = self . cursor_position_col + ( 0 if before else 1 )
for i , line in enumerate ( data . text . split ( '\n' ) ) :
index = i + start_line
if index >= len ( lines ) :
lines . append ( '' )
lines [ index ] = lines [ index ] . ljust ( start_column )
lines [ index ] = lines [ index ] [ : start_column ] + line * count + lines [ index ] [ start_column : ]
new_text = '\n' . join ( lines )
new_cursor_position = self . cursor_position + ( 0 if before else 1 )
return Document ( text = new_text , cursor_position = new_cursor_position ) |
def add_file_to_tree ( tree , file_path , file_contents , is_executable = False ) :
"""Add a file to a tree .
Args :
tree
A list of dicts containing info about each blob in a tree .
file _ path
The path of the new file in the tree .
file _ contents
The ( UTF - 8 encoded ) contents of the new file .
is _ executable
If ` ` True ` ` , the new file will get executable permissions ( 0755 ) .
Otherwise , it will get 0644 permissions .
Returns :
The provided tree , but with the new file added .""" | record = { "path" : file_path , "mode" : "100755" if is_executable else "100644" , "type" : "blob" , "content" : file_contents , }
tree . append ( record )
return tree |
def ProcessHttpResponse ( self , method_config , http_response , request = None ) :
"""Convert an HTTP response to the expected message type .""" | return self . __client . ProcessResponse ( method_config , self . __ProcessHttpResponse ( method_config , http_response , request ) ) |
def leading_whitespace ( self , line ) : # type : ( str ) - > str
"""For preserving indents
: param line :
: return :""" | string = ""
for char in line :
if char in " \t" :
string += char
continue
else :
return string
return string |
def opcode ( * opcodes ) :
"""A decorator for opcodes""" | def decorator ( func ) :
setattr ( func , "_is_opcode" , True )
setattr ( func , "_opcodes" , opcodes )
return func
return decorator |
def as_dict ( df , ix = ':' ) :
"""converts df to dict and adds a datetime field if df is datetime""" | if isinstance ( df . index , pd . DatetimeIndex ) :
df [ 'datetime' ] = df . index
return df . to_dict ( orient = 'records' ) [ ix ] |
def keysym_to_string ( keysym ) :
'''Translate a keysym ( 16 bit number ) into a python string .
This will pass 0 to 0xff as well as XK _ BackSpace , XK _ Tab , XK _ Clear ,
XK _ Return , XK _ Pause , XK _ Scroll _ Lock , XK _ Escape , XK _ Delete . For other
values it returns None .''' | # ISO latin 1 , LSB is the code
if keysym & 0xff00 == 0 :
return chr ( keysym & 0xff )
if keysym in [ XK_BackSpace , XK_Tab , XK_Clear , XK_Return , XK_Pause , XK_Scroll_Lock , XK_Escape , XK_Delete ] :
return chr ( keysym & 0xff )
# We should be able to do these things quite automatically
# for latin2 , latin3 , etc , in Python 2.0 using the Unicode ,
# but that will have to wait .
return None |
def succeed ( self , instance , action ) :
"""Returns if the task for the instance took place successfully""" | uid = api . get_uid ( instance )
return self . objects . get ( uid , { } ) . get ( action , { } ) . get ( 'success' , False ) |
def do_batch ( args ) :
"""Runs the batch list , batch show or batch status command , printing output
to the console
Args :
args : The parsed arguments sent to the command at runtime""" | if args . subcommand == 'list' :
do_batch_list ( args )
if args . subcommand == 'show' :
do_batch_show ( args )
if args . subcommand == 'status' :
do_batch_status ( args )
if args . subcommand == 'submit' :
do_batch_submit ( args ) |
def folderitem ( self , obj , item , index ) :
"""Service triggered each time an item is iterated in folderitems .
The use of this service prevents the extra - loops in child objects .
: obj : the instance of the class to be foldered
: item : dict containing the properties of the object to be used by
the template
: index : current index of the item""" | # ensure we have an object and not a brain
obj = api . get_object ( obj )
uid = api . get_uid ( obj )
# settings for this analysis
service_settings = self . context . getAnalysisServiceSettings ( uid )
hidden = service_settings . get ( "hidden" , obj . getHidden ( ) )
# get the category
category = obj . getCategoryTitle ( )
item [ "category" ] = category
if category not in self . categories :
self . categories . append ( category )
price = obj . getPrice ( )
keyword = obj . getKeyword ( )
if uid in self . analyses :
analysis = self . analyses [ uid ]
# Might differ from the service keyword
keyword = analysis . getKeyword ( )
# Mark the row as disabled if the analysis is not in an open state
item [ "disabled" ] = not any ( [ analysis . isOpen ( ) , analysis . isRegistered ( ) ] )
# get the hidden status of the analysis
hidden = analysis . getHidden ( )
# get the price of the analysis
price = analysis . getPrice ( )
# get the specification of this object
rr = self . get_results_range ( )
spec = rr . get ( keyword , ResultsRangeDict ( ) )
item [ "Title" ] = obj . Title ( )
item [ "Unit" ] = obj . getUnit ( )
item [ "Price" ] = price
item [ "before" ] [ "Price" ] = self . get_currency_symbol ( )
item [ "allow_edit" ] = self . get_editable_columns ( obj )
item [ "selected" ] = uid in self . selected
item [ "min" ] = str ( spec . get ( "min" , "" ) )
item [ "max" ] = str ( spec . get ( "max" , "" ) )
item [ "warn_min" ] = str ( spec . get ( "warn_min" , "" ) )
item [ "warn_max" ] = str ( spec . get ( "warn_max" , "" ) )
item [ "Hidden" ] = hidden
# Append info link before the service
# see : bika . lims . site . coffee for the attached event handler
item [ "before" ] [ "Title" ] = get_link ( "analysisservice_info?service_uid={}" . format ( uid ) , value = "<span class='glyphicon glyphicon-info-sign'></span>" , css_class = "service_info" )
# Icons
after_icons = ""
if obj . getAccredited ( ) :
after_icons += get_image ( "accredited.png" , title = t ( _ ( "Accredited" ) ) )
if obj . getAttachmentOption ( ) == "r" :
after_icons += get_image ( "attach_reqd.png" , title = t ( _ ( "Attachment required" ) ) )
if obj . getAttachmentOption ( ) == "n" :
after_icons += get_image ( "attach_no.png" , title = t ( _ ( 'Attachment not permitted' ) ) )
if after_icons :
item [ "after" ] [ "Title" ] = after_icons
return item |
def _get_scheduler ( self , net , policy , ** scheduler_kwargs ) :
"""Return scheduler , based on indicated policy , with appropriate
parameters .""" | if policy not in [ CyclicLR , ReduceLROnPlateau ] and 'last_epoch' not in scheduler_kwargs :
last_epoch = len ( net . history ) - 1
scheduler_kwargs [ 'last_epoch' ] = last_epoch
if policy is CyclicLR and 'last_batch_idx' not in scheduler_kwargs :
scheduler_kwargs [ 'last_batch_idx' ] = self . batch_idx_ - 1
return policy ( net . optimizer_ , ** scheduler_kwargs ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.