signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def stddev_samples ( data , xcol , ycollist , delta = 1.0 ) :
"""Create a sample list that contains the mean and standard deviation of the original list . Each element in the returned list contains following values : [ MEAN , STDDEV , MEAN - STDDEV * delta , MEAN + STDDEV * delta ] .
> > > chart _ data . stddev _ samples ( [ [ 1 , 10 , 15 , 12 , 15 ] , [ 2 , 5 , 10 , 5 , 10 ] , [ 3 , 32 , 33 , 35 , 36 ] , [ 4,16,66 , 67 , 68 ] ] , 0 , range ( 1,5 ) )
[ ( 1 , 13.0 , 2.1213203435596424 , 10.878679656440358 , 15.121320343559642 ) , ( 2 , 7.5 , 2.5 , 5.0 , 10.0 ) , ( 3 , 34.0 , 1.5811388300841898 , 32.418861169915807 , 35.581138830084193 ) , ( 4 , 54.25 , 22.094965489902897 , 32.155034510097103 , 76.344965489902904 ) ]""" | out = [ ]
numcol = len ( ycollist )
try :
for elem in data :
total = 0
for col in ycollist :
total += elem [ col ]
mean = float ( total ) / numcol
variance = 0
for col in ycollist :
variance += ( mean - elem [ col ] ) ** 2
stddev = math . sqrt ( variance / numcol ) * delta
out . append ( ( elem [ xcol ] , mean , stddev , mean - stddev , mean + stddev ) )
except IndexError :
raise IndexError ( "bad data: %s,xcol=%d,ycollist=%s" % ( data , xcol , ycollist ) )
return out |
def readGraph ( edgeList , nodeList = None , directed = False , idKey = 'ID' , eSource = 'From' , eDest = 'To' ) :
"""Reads the files given by _ edgeList _ and _ nodeList _ and creates a networkx graph for the files .
This is designed only for the files produced by metaknowledge and is meant to be the reverse of [ writeGraph ( ) ] ( # metaknowledge . graphHelpers . writeGraph ) , if this does not produce the desired results the networkx builtin [ networkx . read _ edgelist ( ) ] ( https : / / networkx . github . io / documentation / networkx - 1.10 / reference / generated / networkx . readwrite . edgelist . read _ edgelist . html ) could be tried as it is aimed at a more general usage .
The read edge list format assumes the column named _ eSource _ ( default ` ' From ' ` ) is the source node , then the column _ eDest _ ( default ` ' To ' ` ) givens the destination and all other columns are attributes of the edges , e . g . weight .
The read node list format assumes the column _ idKey _ ( default ` ' ID ' ` ) is the ID of the node for the edge list and the resulting network . All other columns are considered attributes of the node , e . g . count .
* * Note * * : If the names of the columns do not match those given to * * readGraph ( ) * * a ` KeyError ` exception will be raised .
* * Note * * : If nodes appear in the edgelist but not the nodeList they will be created silently with no attributes .
# Parameters
_ edgeList _ : ` str `
> a string giving the path to the edge list file
_ nodeList _ : ` optional [ str ] `
> default ` None ` , a string giving the path to the node list file
_ directed _ : ` optional [ bool ] `
> default ` False ` , if ` True ` the produced network is directed from _ eSource _ to _ eDest _
_ idKey _ : ` optional [ str ] `
> default ` ' ID ' ` , the name of the ID column in the node list
_ eSource _ : ` optional [ str ] `
> default ` ' From ' ` , the name of the source column in the edge list
_ eDest _ : ` optional [ str ] `
> default ` ' To ' ` , the name of the destination column in the edge list
# Returns
` networkx Graph `
> the graph described by the input files""" | progArgs = ( 0 , "Starting to reading graphs" )
if metaknowledge . VERBOSE_MODE :
progKwargs = { 'dummy' : False }
else :
progKwargs = { 'dummy' : True }
with _ProgressBar ( * progArgs , ** progKwargs ) as PBar :
if directed :
grph = nx . DiGraph ( )
else :
grph = nx . Graph ( )
if nodeList :
PBar . updateVal ( 0 , "Reading " + nodeList )
f = open ( os . path . expanduser ( os . path . abspath ( nodeList ) ) )
nFile = csv . DictReader ( f )
for line in nFile :
vals = line
ndID = vals [ idKey ]
del vals [ idKey ]
if len ( vals ) > 0 :
grph . add_node ( ndID , ** vals )
else :
grph . add_node ( ndID )
f . close ( )
PBar . updateVal ( .25 , "Reading " + edgeList )
f = open ( os . path . expanduser ( os . path . abspath ( edgeList ) ) )
eFile = csv . DictReader ( f )
for line in eFile :
vals = line
eFrom = vals [ eSource ]
eTo = vals [ eDest ]
del vals [ eSource ]
del vals [ eDest ]
if len ( vals ) > 0 :
grph . add_edge ( eFrom , eTo , ** vals )
else :
grph . add_edge ( eFrom , eTo )
PBar . finish ( "{} nodes and {} edges found" . format ( len ( grph . nodes ( ) ) , len ( grph . edges ( ) ) ) )
f . close ( )
return grph |
def send_message ( self , options ) :
"""Sends a message to the wandb process changing the policy
of saved files . This is primarily used internally by wandb . save""" | if not options . get ( "save_policy" ) :
raise ValueError ( "Only configuring save_policy is supported" )
if self . socket :
self . socket . send ( options )
elif self . _jupyter_agent :
self . _jupyter_agent . start ( )
self . _jupyter_agent . rm . update_user_file_policy ( options [ "save_policy" ] )
else :
wandb . termerror ( "wandb.init hasn't been called, can't configure run" ) |
def deserialize_long ( attr ) :
"""Deserialize string into long ( Py2 ) or int ( Py3 ) .
: param str attr : response string to be deserialized .
: rtype : long or int
: raises : ValueError if string format invalid .""" | if isinstance ( attr , ET . Element ) :
attr = attr . text
return _long_type ( attr ) |
def set_language ( self , language , dialect = None ) :
"""Set the language used for TTS .
en : English
es : Spanish | [ lan : latino or ca : castilian ]""" | self . currentAction = 'setting language'
l = 0
if language == 'en' :
l = 0
elif language == 'es' :
l = 1
if dialect == 'ca' :
l = 2
self . queue . put ( 'l%s' % ( l ) ) |
def select_data ( self , iteration_indices ) :
"""keep only data of ` iteration _ indices `""" | dat = self
iteridx = iteration_indices
dat . f = dat . f [ np . where ( [ x in iteridx for x in dat . f [ : , 0 ] ] ) [ 0 ] , : ]
dat . D = dat . D [ np . where ( [ x in iteridx for x in dat . D [ : , 0 ] ] ) [ 0 ] , : ]
try :
iteridx = list ( iteridx )
iteridx . append ( iteridx [ - 1 ] )
# last entry is artificial
except :
pass
dat . std = dat . std [ np . where ( [ x in iteridx for x in dat . std [ : , 0 ] ] ) [ 0 ] , : ]
dat . xmean = dat . xmean [ np . where ( [ x in iteridx for x in dat . xmean [ : , 0 ] ] ) [ 0 ] , : ]
try :
dat . xrecent = dat . x [ np . where ( [ x in iteridx for x in dat . xrecent [ : , 0 ] ] ) [ 0 ] , : ]
except AttributeError :
pass
try :
dat . corrspec = dat . x [ np . where ( [ x in iteridx for x in dat . corrspec [ : , 0 ] ] ) [ 0 ] , : ]
except AttributeError :
pass |
def create_datacenter ( kwargs = None , call = None ) :
'''Create a new data center in this VMware environment
CLI Example :
. . code - block : : bash
salt - cloud - f create _ datacenter my - vmware - config name = " MyNewDatacenter "''' | if call != 'function' :
raise SaltCloudSystemExit ( 'The create_datacenter function must be called with ' '-f or --function.' )
datacenter_name = kwargs . get ( 'name' ) if kwargs and 'name' in kwargs else None
if not datacenter_name :
raise SaltCloudSystemExit ( 'You must specify name of the new datacenter to be created.' )
if not datacenter_name or len ( datacenter_name ) >= 80 :
raise SaltCloudSystemExit ( 'The datacenter name must be a non empty string of less than 80 characters.' )
# Get the service instance
si = _get_si ( )
# Check if datacenter already exists
datacenter_ref = salt . utils . vmware . get_mor_by_property ( si , vim . Datacenter , datacenter_name )
if datacenter_ref :
return { datacenter_name : 'datacenter already exists' }
folder = si . content . rootFolder
# Verify that the folder is of type vim . Folder
if isinstance ( folder , vim . Folder ) :
try :
folder . CreateDatacenter ( name = datacenter_name )
except Exception as exc :
log . error ( 'Error creating datacenter %s: %s' , datacenter_name , exc , # Show the traceback if the debug logging level is enabled
exc_info_on_loglevel = logging . DEBUG )
return False
log . debug ( "Created datacenter %s" , datacenter_name )
return { datacenter_name : 'created' }
return False |
def asDictionary ( self ) :
"""returns the object as a python dictionary""" | template = { "x" : self . _x , "y" : self . _y , "spatialReference" : self . spatialReference }
if not self . _z is None :
template [ 'z' ] = self . _z
if not self . _m is None :
template [ 'z' ] = self . _m
return template |
def get_snapshots ( self ) :
"""Returns a list of all completed snapshots for this volume ID .""" | ec2 = self . get_ec2_connection ( )
rs = ec2 . get_all_snapshots ( )
all_vols = [ self . volume_id ] + self . past_volume_ids
snaps = [ ]
for snapshot in rs :
if snapshot . volume_id in all_vols :
if snapshot . progress == '100%' :
snapshot . date = boto . utils . parse_ts ( snapshot . start_time )
snapshot . keep = True
snaps . append ( snapshot )
snaps . sort ( cmp = lambda x , y : cmp ( x . date , y . date ) )
return snaps |
def get_res ( ds , t_srs = None , square = False ) :
"""Get GDAL Dataset raster resolution""" | gt = ds . GetGeoTransform ( )
ds_srs = get_ds_srs ( ds )
# This is Xres , Yres
res = [ gt [ 1 ] , np . abs ( gt [ 5 ] ) ]
if square :
res = [ np . mean ( res ) , np . mean ( res ) ]
if t_srs is not None and not ds_srs . IsSame ( t_srs ) :
if True : # This diagonal approach is similar to the approach in gdaltransformer . cpp
# Bad news for large extents near the poles
# ullr = get _ ullr ( ds , t _ srs )
# diag = np . sqrt ( ( ullr [ 0 ] - ullr [ 2 ] ) * * 2 + ( ullr [ 1 ] - ullr [ 3 ] ) * * 2)
extent = ds_extent ( ds , t_srs )
diag = np . sqrt ( ( extent [ 2 ] - extent [ 0 ] ) ** 2 + ( extent [ 3 ] - extent [ 1 ] ) ** 2 )
res = diag / np . sqrt ( ds . RasterXSize ** 2 + ds . RasterYSize ** 2 )
res = [ res , res ]
else : # Compute from center pixel
ct = osr . CoordinateTransformation ( ds_srs , t_srs )
pt = get_center ( ds )
# Transform center coordinates
pt_ct = ct . TransformPoint ( * pt )
# Transform center + single pixel offset coordinates
pt_ct_plus = ct . TransformPoint ( pt [ 0 ] + gt [ 1 ] , pt [ 1 ] + gt [ 5 ] )
# Compute resolution in new units
res = [ pt_ct_plus [ 0 ] - pt_ct [ 0 ] , np . abs ( pt_ct_plus [ 1 ] - pt_ct [ 1 ] ) ]
return res |
def publish_proto_in_ipfs ( ipfs_client , protodir ) :
"""make tar from protodir / * proto , and publish this tar in ipfs
return base58 encoded ipfs hash""" | if ( not os . path . isdir ( protodir ) ) :
raise Exception ( "Directory %s doesn't exists" % protodir )
files = glob . glob ( os . path . join ( protodir , "*.proto" ) )
if ( len ( files ) == 0 ) :
raise Exception ( "Cannot find any %s files" % ( os . path . join ( protodir , "*.proto" ) ) )
# We are sorting files before we add them to the . tar since an archive containing the same files in a different
# order will produce a different content hash ;
files . sort ( )
tarbytes = io . BytesIO ( )
tar = tarfile . open ( fileobj = tarbytes , mode = "w" )
for f in files :
tar . add ( f , os . path . basename ( f ) )
tar . close ( )
return ipfs_client . add_bytes ( tarbytes . getvalue ( ) ) |
def update_image ( self , image_id , user_name , desc = None ) :
"""Create or update an Image in Image Registry .""" | desc = desc if desc else ''
data = { "username" : user_name , "description" : desc }
return self . _post ( '/images/%s' % image_id , data ) |
def _get_color_values ( adata , value_to_plot , groups = None , palette = None , use_raw = False , gene_symbols = None , layer = None ) :
"""Returns the value or color associated to each data point .
For categorical data , the return value is list of colors taken
from the category palette or from the given ` palette ` value .
For non - categorical data , the values are returned""" | # when plotting , the color of the dots is determined for each plot
# the data is either categorical or continuous and the data could be in
# ' obs ' or in ' var '
categorical = False
if value_to_plot is None :
color_vector = 'lightgray'
# check if value to plot is in obs
elif value_to_plot in adata . obs . columns :
if is_categorical_dtype ( adata . obs [ value_to_plot ] ) :
categorical = True
if palette : # use category colors base on given palette
_set_colors_for_categorical_obs ( adata , value_to_plot , palette )
else :
if value_to_plot + '_colors' not in adata . uns or len ( adata . uns [ value_to_plot + '_colors' ] ) < len ( adata . obs [ value_to_plot ] . cat . categories ) : # set a default palette in case that no colors or few colors are found
_set_default_colors_for_categorical_obs ( adata , value_to_plot )
else : # check that the colors in ' uns ' are valid
_palette = [ ]
for color in adata . uns [ value_to_plot + '_colors' ] :
if not is_color_like ( color ) : # check if the color is a valid R color and translate it
# to a valid hex color value
if color in utils . additional_colors :
color = utils . additional_colors [ color ]
else :
logg . warn ( "The following color value found in adata.uns['{}'] " " is not valid: '{}'. Default colors are used." . format ( value_to_plot + '_colors' , color ) )
_set_default_colors_for_categorical_obs ( adata , value_to_plot )
_palette = None
break
_palette . append ( color )
if _palette is not None :
adata . uns [ value_to_plot + '_colors' ] = _palette
# for categorical data , colors should be
# stored in adata . uns [ value _ to _ plot + ' _ colors ' ]
# Obtain color vector by converting every category
# into its respective color
color_vector = [ adata . uns [ value_to_plot + '_colors' ] [ x ] for x in adata . obs [ value_to_plot ] . cat . codes ]
if groups is not None :
if isinstance ( groups , str ) :
groups = [ groups ]
color_vector = np . array ( color_vector , dtype = '<U15' )
# set color to ' light gray ' for all values
# that are not in the groups
color_vector [ ~ adata . obs [ value_to_plot ] . isin ( groups ) ] = "lightgray"
else :
color_vector = adata . obs [ value_to_plot ] . values
# when value _ to _ plot is not in adata . obs
else :
if gene_symbols is not None and gene_symbols in adata . var . columns :
if value_to_plot not in adata . var [ gene_symbols ] . values :
logg . error ( "Gene symbol {!r} not found in given gene_symbols " "column: {!r}" . format ( value_to_plot , gene_symbols ) )
return
value_to_plot = adata . var [ adata . var [ gene_symbols ] == value_to_plot ] . index [ 0 ]
if layer is not None and value_to_plot in adata . var_names :
if layer not in adata . layers . keys ( ) :
raise KeyError ( 'Selected layer: {} is not in the layers list. The list of ' 'valid layers is: {}' . format ( layer , adata . layers . keys ( ) ) )
color_vector = adata [ : , value_to_plot ] . layers [ layer ]
elif use_raw and value_to_plot in adata . raw . var_names :
color_vector = adata . raw [ : , value_to_plot ] . X
elif value_to_plot in adata . var_names :
color_vector = adata [ : , value_to_plot ] . X
else :
raise ValueError ( "The passed `color` {} is not a valid observation annotation " "or variable name. Valid observation annotation keys are: {}" . format ( value_to_plot , adata . obs . columns ) )
return color_vector , categorical |
def get_more ( collection_name , num_to_return , cursor_id ) :
"""Get a * * getMore * * message .""" | data = __ZERO
data += bson . _make_c_string ( collection_name )
data += struct . pack ( "<i" , num_to_return )
data += struct . pack ( "<q" , cursor_id )
return __pack_message ( 2005 , data ) |
def spherical ( coordinates ) :
"""No error is propagated""" | c = coordinates
r = N . linalg . norm ( c , axis = 0 )
theta = N . arccos ( c [ 2 ] / r )
phi = N . arctan2 ( c [ 1 ] , c [ 0 ] )
return N . column_stack ( ( r , theta , phi ) ) |
def target_query ( plugin , port , location ) :
"""prepared ReQL for target""" | return ( ( r . row [ PLUGIN_NAME_KEY ] == plugin ) & ( r . row [ PORT_FIELD ] == port ) & ( r . row [ LOCATION_FIELD ] == location ) ) |
def notify ( title , message , jid , password , recipient , hostname = None , port = 5222 , path_to_certs = None , mtype = None , retcode = None ) :
"""Optional parameters
* ` ` hostname ` ` ( if not from jid )
* ` ` port ` `
* ` ` path _ to _ certs ` `
* ` ` mtype ` ` ( ' chat ' required for Google Hangouts )
To verify the SSL certificates offered by a server :
path _ to _ certs = " path / to / ca / cert "
Without dnspython library installed , you will need
to specify the server hostname if it doesn ' t match the jid .
For example , to use Google Talk you would need to use :
hostname = ' talk . google . com '
Specify port if other than 5222.
NOTE : Ignored without specified hostname""" | xmpp_bot = NtfySendMsgBot ( jid , password , recipient , title , message , mtype )
# NOTE : Below plugins weren ' t needed for Google Hangouts
# but may be useful ( from original sleekxmpp example )
# xmpp _ bot . register _ plugin ( ' xep _ 0030 ' ) # Service Discovery
# xmpp _ bot . register _ plugin ( ' xep _ 0199 ' ) # XMPP Ping
if path_to_certs and os . path . isdir ( path_to_certs ) :
xmpp_bot . ca_certs = path_to_certs
# Connect to the XMPP server and start processing XMPP stanzas .
if xmpp_bot . connect ( * ( [ ( hostname , int ( port ) ) if hostname else [ ] ] ) ) :
xmpp_bot . process ( block = True )
else :
logging . getLogger ( __name__ ) . error ( 'Unable to connect' , exc_info = True ) |
def lstat ( self , path ) :
"""Retrieve information about a file on the remote system , without
following symbolic links ( shortcuts ) . This otherwise behaves exactly
the same as L { stat } .
@ param path : the filename to stat
@ type path : str
@ return : an object containing attributes about the given file
@ rtype : SFTPAttributes""" | path = self . _adjust_cwd ( path )
self . _log ( DEBUG , 'lstat(%r)' % path )
t , msg = self . _request ( CMD_LSTAT , path )
if t != CMD_ATTRS :
raise SFTPError ( 'Expected attributes' )
return SFTPAttributes . _from_msg ( msg ) |
def show_vcs_output_nodes_disconnected_from_cluster ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
show_vcs = ET . Element ( "show_vcs" )
config = show_vcs
output = ET . SubElement ( show_vcs , "output" )
nodes_disconnected_from_cluster = ET . SubElement ( output , "nodes-disconnected-from-cluster" )
nodes_disconnected_from_cluster . text = kwargs . pop ( 'nodes_disconnected_from_cluster' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def cast ( func , value ) :
"""Cast the specified value to the specified type ( returned by func ) . Currently this
only support int , float , bool . Should be extended if needed .
Parameters :
func ( func ) : Calback function to used cast to type ( int , bool , float ) .
value ( any ) : value to be cast and returned .""" | if value is not None :
if func == bool :
return bool ( int ( value ) )
elif func in ( int , float ) :
try :
return func ( value )
except ValueError :
return float ( 'nan' )
return func ( value )
return value |
def certify_enum_value ( value , kind = None , required = True ) :
"""Certifier for enum values .
: param value :
The value to be certified .
: param kind :
The enum type that value should be an instance of .
: param bool required :
Whether the value can be ` None ` . Defaults to True .
: raises CertifierValueError :
The type is invalid""" | if certify_required ( value = value , required = required , ) :
return
try :
kind ( value )
except : # noqa
raise CertifierValueError ( message = "value {value!r} is not a valid member of {enum!r}" . format ( value = value , enum = kind . __name__ ) , value = value , required = required , ) |
def value ( self ) :
"""gets the value as a dictionary""" | return { "type" : self . _type , "name" : self . _name , "range" : [ self . _rangeMin , self . _rangeMax ] } |
def request_help ( self , req , msg ) :
"""Return help on the available requests .
Return a description of the available requests using a sequence of
# help informs .
Parameters
request : str , optional
The name of the request to return help for ( the default is to
return help for all requests ) .
Informs
request : str
The name of a request .
description : str
Documentation for the named request .
Returns
success : { ' ok ' , ' fail ' }
Whether sending the help succeeded .
informs : int
Number of # help inform messages sent .
Examples
? help
# help halt . . . description . . .
# help help . . . description . . .
! help ok 5
? help halt
# help halt . . . description . . .
! help ok 1""" | if not msg . arguments :
for name , method in sorted ( self . _request_handlers . items ( ) ) :
doc = method . __doc__
req . inform ( name , doc )
num_methods = len ( self . _request_handlers )
return req . make_reply ( "ok" , str ( num_methods ) )
else :
name = msg . arguments [ 0 ]
if name in self . _request_handlers :
method = self . _request_handlers [ name ]
doc = method . __doc__ . strip ( )
req . inform ( name , doc )
return req . make_reply ( "ok" , "1" )
return req . make_reply ( "fail" , "Unknown request method." ) |
def latsph ( radius , lon , lat ) :
"""Convert from latitudinal coordinates to spherical coordinates .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / latsph _ c . html
: param radius : Distance of a point from the origin .
: param lon : Angle of the point from the XZ plane in radians .
: param lat : Angle of the point from the XY plane in radians .
: return : ( rho colat , lons )
: rtype : tuple""" | radius = ctypes . c_double ( radius )
lon = ctypes . c_double ( lon )
lat = ctypes . c_double ( lat )
rho = ctypes . c_double ( )
colat = ctypes . c_double ( )
lons = ctypes . c_double ( )
libspice . latsph_c ( radius , lon , lat , ctypes . byref ( rho ) , ctypes . byref ( colat ) , ctypes . byref ( lons ) )
return rho . value , colat . value , lons . value |
def write ( self , outfname = None ) :
"""Write or overwrite a ` Survey ` to the specified . DAT file""" | outfname = outfname or self . filename
with codecs . open ( outfname , 'wb' , 'windows-1252' ) as outf :
for survey in self . surveys :
outf . write ( '\r\n' . join ( survey . _serialize ( ) ) )
outf . write ( '\r\n' + '\f' + '\r\n' )
# ASCII " form feed " ^ L
outf . write ( '\x1A' ) |
def get_default_storage_policy_of_datastore ( profile_manager , datastore ) :
'''Returns the default storage policy reference assigned to a datastore .
profile _ manager
Reference to the profile manager .
datastore
Reference to the datastore .''' | # Retrieve all datastores visible
hub = pbm . placement . PlacementHub ( hubId = datastore . _moId , hubType = 'Datastore' )
log . trace ( 'placement_hub = %s' , hub )
try :
policy_id = profile_manager . QueryDefaultRequirementProfile ( hub )
except vim . fault . NoPermission as exc :
log . exception ( exc )
raise VMwareApiError ( 'Not enough permissions. Required privilege: ' '{0}' . format ( exc . privilegeId ) )
except vim . fault . VimFault as exc :
log . exception ( exc )
raise VMwareApiError ( exc . msg )
except vmodl . RuntimeFault as exc :
log . exception ( exc )
raise VMwareRuntimeError ( exc . msg )
policy_refs = get_policies_by_id ( profile_manager , [ policy_id ] )
if not policy_refs :
raise VMwareObjectRetrievalError ( 'Storage policy with id \'{0}\' was ' 'not found' . format ( policy_id ) )
return policy_refs [ 0 ] |
def get_bgcolor ( self , index ) :
"""Background color depending on value .""" | column = index . column ( )
if not self . bgcolor_enabled :
return
value = self . get_value ( index . row ( ) , column )
if self . max_min_col [ column ] is None or isna ( value ) :
color = QColor ( BACKGROUND_NONNUMBER_COLOR )
if is_text_string ( value ) :
color . setAlphaF ( BACKGROUND_STRING_ALPHA )
else :
color . setAlphaF ( BACKGROUND_MISC_ALPHA )
else :
if isinstance ( value , COMPLEX_NUMBER_TYPES ) :
color_func = abs
else :
color_func = float
vmax , vmin = self . return_max ( self . max_min_col , column )
hue = ( BACKGROUND_NUMBER_MINHUE + BACKGROUND_NUMBER_HUERANGE * ( vmax - color_func ( value ) ) / ( vmax - vmin ) )
hue = float ( abs ( hue ) )
if hue > 1 :
hue = 1
color = QColor . fromHsvF ( hue , BACKGROUND_NUMBER_SATURATION , BACKGROUND_NUMBER_VALUE , BACKGROUND_NUMBER_ALPHA )
return color |
def update_hash_from_str ( hsh , str_input ) :
"""Convert a str to object supporting buffer API and update a hash with it .""" | byte_input = str ( str_input ) . encode ( "UTF-8" )
hsh . update ( byte_input ) |
def do_cli ( function_name , stack_name , filter_pattern , tailing , start_time , end_time ) :
"""Implementation of the ` ` cli ` ` method""" | LOG . debug ( "'logs' command is called" )
with LogsCommandContext ( function_name , stack_name = stack_name , filter_pattern = filter_pattern , start_time = start_time , end_time = end_time , # output _ file is not yet supported by CLI
output_file = None ) as context :
if tailing :
events_iterable = context . fetcher . tail ( context . log_group_name , filter_pattern = context . filter_pattern , start = context . start_time )
else :
events_iterable = context . fetcher . fetch ( context . log_group_name , filter_pattern = context . filter_pattern , start = context . start_time , end = context . end_time )
formatted_events = context . formatter . do_format ( events_iterable )
for event in formatted_events : # New line is not necessary . It is already in the log events sent by CloudWatch
click . echo ( event , nl = False ) |
def extract ( self , destination , format = 'csv' , csv_delimiter = None , csv_header = True , compress = False ) :
"""Exports the table to GCS ; blocks until complete .
Args :
destination : the destination URI ( s ) . Can be a single URI or a list .
format : the format to use for the exported data ; one of ' csv ' , ' json ' , or ' avro '
( default ' csv ' ) .
csv _ delimiter : for CSV exports , the field delimiter to use . Defaults to ' , '
csv _ header : for CSV exports , whether to include an initial header line . Default true .
compress : whether to compress the data on export . Compression is not supported for
AVRO format . Defaults to False .
Returns :
A Job object for the completed export Job if it was started successfully ; else None .""" | job = self . extract_async ( destination , format = format , csv_delimiter = csv_delimiter , csv_header = csv_header , compress = compress )
if job is not None :
job . wait ( )
return job |
def assistant_fallback_actions ( self ) :
"""Access the assistant _ fallback _ actions
: returns : twilio . rest . preview . understand . assistant . assistant _ fallback _ actions . AssistantFallbackActionsList
: rtype : twilio . rest . preview . understand . assistant . assistant _ fallback _ actions . AssistantFallbackActionsList""" | if self . _assistant_fallback_actions is None :
self . _assistant_fallback_actions = AssistantFallbackActionsList ( self . _version , assistant_sid = self . _solution [ 'sid' ] , )
return self . _assistant_fallback_actions |
def _controller_buffer ( self , port ) :
"""Find the pointer to a controller and setup a NumPy buffer .
Args :
port : the port of the controller to setup
Returns :
a NumPy buffer with the controller ' s binary data""" | # get the address of the controller
address = _LIB . Controller ( self . _env , port )
# create a memory buffer using the ctypes pointer for this vector
buffer_ = ctypes . cast ( address , ctypes . POINTER ( CONTROLLER_VECTOR ) ) . contents
# create a NumPy buffer from the binary data and return it
return np . frombuffer ( buffer_ , dtype = 'uint8' ) |
def main ( ) :
"""Do the default action of ` twitter ` command .""" | from twitter . cmdline import Action , OPTIONS
twitter = Twitter . from_oauth_file ( )
Action ( ) ( twitter , OPTIONS ) |
def _weighting ( weights , exponent ) :
"""Return a weighting whose type is inferred from the arguments .""" | if np . isscalar ( weights ) :
weighting = NumpyTensorSpaceConstWeighting ( weights , exponent )
elif weights is None :
weighting = NumpyTensorSpaceConstWeighting ( 1.0 , exponent )
else : # last possibility : make an array
arr = np . asarray ( weights )
weighting = NumpyTensorSpaceArrayWeighting ( arr , exponent )
return weighting |
def parse_command ( bot : NoneBot , cmd_string : str ) -> Tuple [ Optional [ Command ] , Optional [ str ] ] :
"""Parse a command string ( typically from a message ) .
: param bot : NoneBot instance
: param cmd _ string : command string
: return : ( Command object , current arg string )""" | logger . debug ( f'Parsing command: {cmd_string}' )
matched_start = None
for start in bot . config . COMMAND_START : # loop through COMMAND _ START to find the longest matched start
curr_matched_start = None
if isinstance ( start , type ( re . compile ( '' ) ) ) :
m = start . search ( cmd_string )
if m and m . start ( 0 ) == 0 :
curr_matched_start = m . group ( 0 )
elif isinstance ( start , str ) :
if cmd_string . startswith ( start ) :
curr_matched_start = start
if curr_matched_start is not None and ( matched_start is None or len ( curr_matched_start ) > len ( matched_start ) ) : # a longer start , use it
matched_start = curr_matched_start
if matched_start is None : # it ' s not a command
logger . debug ( 'It\'s not a command' )
return None , None
logger . debug ( f'Matched command start: ' f'{matched_start}{"(empty)" if not matched_start else ""}' )
full_command = cmd_string [ len ( matched_start ) : ] . lstrip ( )
if not full_command : # command is empty
return None , None
cmd_name_text , * cmd_remained = full_command . split ( maxsplit = 1 )
cmd_name = _aliases . get ( cmd_name_text )
if not cmd_name :
for sep in bot . config . COMMAND_SEP : # loop through COMMAND _ SEP to find the most optimized split
curr_cmd_name = None
if isinstance ( sep , type ( re . compile ( '' ) ) ) :
curr_cmd_name = tuple ( sep . split ( cmd_name_text ) )
elif isinstance ( sep , str ) :
curr_cmd_name = tuple ( cmd_name_text . split ( sep ) )
if curr_cmd_name is not None and ( not cmd_name or len ( curr_cmd_name ) > len ( cmd_name ) ) : # a more optimized split , use it
cmd_name = curr_cmd_name
if not cmd_name :
cmd_name = ( cmd_name_text , )
logger . debug ( f'Split command name: {cmd_name}' )
cmd = _find_command ( cmd_name )
if not cmd :
logger . debug ( f'Command {cmd_name} not found' )
return None , None
logger . debug ( f'Command {cmd.name} found, function: {cmd.func}' )
return cmd , '' . join ( cmd_remained ) |
def perform_step ( file_contents , step ) :
"""Performs a step of the transformation .
: param text file _ contents : Contends of the cheetah template
: param function step : Function taking xmldoc and returning new contents
: returns : new contents of the file .""" | assert type ( file_contents ) is not bytes
xmldoc = parse ( file_contents )
return step ( xmldoc ) |
def correct_bitstring_probs ( p , assignment_probabilities ) :
"""Given a 2d array of corrupted bitstring probabilities ( outer axis iterates over shots , inner
axis over bits ) and a list of assignment probability matrices ( one for each bit in the readout )
compute the corrected probabilities .
: param np . array p : An array that enumerates bitstring probabilities . When
flattened out ` ` p = [ p _ 00 . . . 0 , p _ 00 . . . 1 , . . . , p _ 11 . . . 1 ] ` ` . The total number of elements must
therefore be a power of 2 . The canonical shape has a separate axis for each qubit , such that
` ` p [ i , j , . . . , k ] ` ` gives the estimated probability of bitstring ` ` ij . . . k ` ` .
: param List [ np . array ] assignment _ probabilities : A list of assignment probability matrices
per qubit . Each assignment probability matrix is expected to be of the form : :
[ [ p00 p01]
[ p10 p11 ] ]
: return : ` ` p _ corrected ` ` an array with as many dimensions as there are qubits that contains
the noisy - readout - corrected estimated probabilities for each measured bitstring , i . e . ,
` ` p [ i , j , . . . , k ] ` ` gives the estimated probability of bitstring ` ` ij . . . k ` ` .
: rtype : np . array""" | return _apply_local_transforms ( p , ( np . linalg . inv ( ap ) for ap in assignment_probabilities ) ) |
def find_peaks ( array , baseline = 0.1 , return_subarrays = False ) :
"""This will try to identify the indices of the peaks in array , returning a list of indices in ascending order .
Runs along the data set until it jumps above baseline . Then it considers all the subsequent data above the baseline
as part of the peak , and records the maximum of this data as one peak value .""" | peaks = [ ]
if return_subarrays :
subarray_values = [ ]
subarray_indices = [ ]
# loop over the data
n = 0
while n < len ( array ) : # see if we ' re above baseline , then start the " we ' re in a peak " loop
if array [ n ] > baseline : # start keeping track of the subarray here
if return_subarrays :
subarray_values . append ( [ ] )
subarray_indices . append ( n )
# find the max
ymax = baseline
nmax = n
while n < len ( array ) and array [ n ] > baseline : # add this value to the subarray
if return_subarrays :
subarray_values [ - 1 ] . append ( array [ n ] )
if array [ n ] > ymax :
ymax = array [ n ]
nmax = n
n = n + 1
# store the max
peaks . append ( nmax )
else :
n = n + 1
if return_subarrays :
return peaks , subarray_values , subarray_indices
else :
return peaks |
def _reduce_helper ( input_shape , output_shape , input_tensor_layout , reduction_fn_string = "SUM" ) :
"""Returns slicewise function and reduced mesh dimensions .
Args :
input _ shape : a Shape
output _ shape : a Shape
input _ tensor _ layout : a TensorLayout
reduction _ fn _ string : " SUM " or " MAX "
Returns :
reduce _ slice _ fn : a function from tf . Tensor to tf . Tensor
reduced _ mesh _ axes : a list of integers""" | reduce_dims_indices = [ i for i , d in enumerate ( input_shape . dims ) if d not in output_shape . dims ]
reduced_input_shape = Shape ( [ d for d in input_shape . dims if d in output_shape . dims ] )
perm = [ reduced_input_shape . dims . index ( d ) for d in output_shape . dims ]
def reduce_slice_fn ( xslice ) :
ret = xslice
if reduce_dims_indices :
ret = reduction_fn ( reduction_fn_string ) ( xslice , reduce_dims_indices )
if perm != list ( xrange ( len ( perm ) ) ) :
ret = tf . transpose ( ret , perm )
return ret
reduced_mesh_axes = [ ]
for i in reduce_dims_indices :
mesh_axis = input_tensor_layout [ i ]
if mesh_axis is not None :
reduced_mesh_axes . append ( mesh_axis )
return reduce_slice_fn , reduced_mesh_axes |
def last_updated ( self , subpath = None ) :
"""Returns the time of the last modification of the Readme or
specified subpath , or None if the file does not exist .
The return value is a number giving the number of seconds since
the epoch ( see the time module ) .
Raises werkzeug . exceptions . NotFound if the resulting path
would fall out of the root directory .""" | try :
return os . path . getmtime ( self . readme_for ( subpath ) )
except ReadmeNotFoundError :
return None
# OSError for Python 3 base class , EnvironmentError for Python 2
except ( OSError , EnvironmentError ) as ex :
if ex . errno == errno . ENOENT :
return None
raise |
def get_network_project ( network_id , ** kwargs ) :
"""get the project that a network is in""" | net_proj = db . DBSession . query ( Project ) . join ( Network , and_ ( Project . id == Network . id , Network . id == network_id ) ) . first ( )
if net_proj is None :
raise HydraError ( "Network %s not found" % network_id )
return net_proj |
def add ( self , * matches , ** kw ) : # kw = default = None , boolean = False
'''Add an argument ; this is optional , and mostly useful for setting up aliases or setting boolean = True
Apparently ` def add ( self , * matches , default = None , boolean = False ) : ` is invalid syntax in Python . Not only is this absolutely ridiculous , but the alternative ` def add ( self , default = None , boolean = False , * matches ) : ` does not do what you would expect . This syntax works as intended in Python 3.
If you provide multiple ` matches ` that are not dash - prefixed , only the first will be used as a positional argument .
Specifying any positional arguments and then using ` boolean = True ` is just weird , and their will be no special consideration for boolean = True in that case for the position - enabled argument .''' | # python syntax hack
default = kw . get ( 'default' , None )
boolean = kw . get ( 'boolean' , False )
del kw
# do not use kw after this line ! It ' s a hack ; it should never have been there in the first place .
positional = None
names = [ ]
for match in matches :
if match . startswith ( '--' ) :
names . append ( match [ 2 : ] )
elif match . startswith ( '-' ) :
names . append ( match [ 1 : ] )
elif positional : # positional has already been filled
names . append ( match )
else : # first positional : becomes canonical positional
positional = match
names . append ( match )
argument = BooleanArgument ( names , default , boolean , positional )
self . arguments . append ( argument )
# chainable
return self |
def MD_ConfigsPermutate ( df_md ) :
"""Given a MD DataFrame , return a Nx4 array which permutes the current
injection dipoles .""" | g_current_injections = df_md . groupby ( [ 'a' , 'b' ] )
ab = np . array ( list ( g_current_injections . groups . keys ( ) ) )
config_mgr = ConfigManager ( nr_of_electrodes = ab . max ( ) )
config_mgr . gen_configs_permutate ( ab , silent = True )
return config_mgr . configs |
def custom_environment ( self , ** kwargs ) :
"""A context manager around the above ` ` update _ environment ` ` method to restore the
environment back to its previous state after operation .
` ` Examples ` ` : :
with self . custom _ environment ( GIT _ SSH = ' / bin / ssh _ wrapper ' ) :
repo . remotes . origin . fetch ( )
: param kwargs : see update _ environment""" | old_env = self . update_environment ( ** kwargs )
try :
yield
finally :
self . update_environment ( ** old_env ) |
def get_cal_data ( data_df , cal_dict , param ) :
'''Get data along specified axis during calibration intervals
Args
data _ df : pandas . DataFrame
Pandas dataframe with lleo data
cal _ dict : dict
Calibration dictionary
Returns
lower : pandas dataframe
slice of lleo datafram containing points at - 1g calibration position
upper : pandas dataframe
slice of lleo datafram containing points at - 1g calibration position
See also
lleoio . read _ data : creates pandas dataframe ` data _ df `
read _ cal : creates ` cal _ dict ` and describes fields''' | param = param . lower ( ) . replace ( ' ' , '_' ) . replace ( '-' , '_' )
idx_lower_start = cal_dict [ 'parameters' ] [ param ] [ 'lower' ] [ 'start' ]
idx_lower_end = cal_dict [ 'parameters' ] [ param ] [ 'lower' ] [ 'end' ]
idx_upper_start = cal_dict [ 'parameters' ] [ param ] [ 'upper' ] [ 'start' ]
idx_upper_end = cal_dict [ 'parameters' ] [ param ] [ 'upper' ] [ 'end' ]
idx_lower = ( data_df . index >= idx_lower_start ) & ( data_df . index <= idx_lower_end )
idx_upper = ( data_df . index >= idx_upper_start ) & ( data_df . index <= idx_upper_end )
return data_df [ param ] [ idx_lower ] , data_df [ param ] [ idx_upper ] |
def _get_dotgraphs ( self , hdrgo , usrgos , pltargs , go2parentids ) :
"""Get a GO DAG in a dot - language string for a single Group of user GOs .""" | gosubdagplotnts = self . _get_gosubdagplotnts ( hdrgo , usrgos , pltargs , go2parentids )
# Create DAG graphs as dot language strings . Loop through GoSubDagPlotNt list
dotstrs = [ obj . get_dotstr ( ) for obj in gosubdagplotnts ]
return dotstrs |
def create_iam_role ( self , account ) :
"""Create a new IAM role . Returns the ARN of the newly created role
Args :
account ( : obj : ` Account ` ) : Account where to create the IAM role
Returns :
` str `""" | try :
iam = self . session . client ( 'iam' )
trust = get_template ( 'vpc_flow_logs_iam_role_trust.json' ) . render ( )
policy = get_template ( 'vpc_flow_logs_role_policy.json' ) . render ( )
newrole = iam . create_role ( Path = '/' , RoleName = self . role_name , AssumeRolePolicyDocument = trust ) [ 'Role' ] [ 'Arn' ]
# Attach an inline policy to the role to avoid conflicts or hitting the Managed Policy Limit
iam . put_role_policy ( RoleName = self . role_name , PolicyName = 'VpcFlowPolicy' , PolicyDocument = policy )
self . log . debug ( 'Created VPC Flow Logs role & policy for {}' . format ( account . account_name ) )
auditlog ( event = 'vpc_flow_logs.create_iam_role' , actor = self . ns , data = { 'account' : account . account_name , 'roleName' : self . role_name , 'trustRelationship' : trust , 'inlinePolicy' : policy } )
return newrole
except Exception :
self . log . exception ( 'Failed creating the VPC Flow Logs role for {}.' . format ( account ) ) |
def hourly ( place ) :
"""return data as list of dicts with all data filled in .""" | # time in utc ?
lat , lon = place
url = "https://api.forecast.io/forecast/%s/%s,%s?solar" % ( APIKEY , lat , lon )
w_data = json . loads ( urllib2 . urlopen ( url ) . read ( ) )
hourly_data = w_data [ 'hourly' ] [ 'data' ]
mangled = [ ]
for i in hourly_data :
mangled . append ( mangle ( i ) )
return mangled |
def load_handgeometry ( ) :
"""Hand Geometry Dataset .
The data of this dataset is a 3d numpy array vector with shape ( 224 , 224 , 3)
containing 112 224x224 RGB photos of hands , and the target is a 1d numpy
float array containing the width of the wrist in centimeters .""" | dataset_path = _load ( 'handgeometry' )
df = _load_csv ( dataset_path , 'data' )
X = _load_images ( os . path . join ( dataset_path , 'images' ) , df . image )
y = df . target . values
return Dataset ( load_handgeometry . __doc__ , X , y , r2_score ) |
def delay ( self , identifier : typing . Any , until : typing . Union [ int , float ] = - 1 , ) -> bool :
"""Delay a deferred function until the given time .
Args :
identifier ( typing . Any ) : The identifier returned from a call
to defer or defer _ for .
until ( typing . Union [ int , float ] ) : A numeric value that represents
the clock time when the callback becomes available for
execution . Values that are less than the current time result in
the function being called at the next opportunity .
Returns :
bool : True if the call is delayed . False if the identifier is
invalid or if the deferred call is already executed .""" | raise NotImplementedError ( ) |
def slice_along_axis ( dataset , n = 5 , axis = 'x' , tolerance = None , generate_triangles = False , contour = False ) :
"""Create many slices of the input dataset along a specified axis .
Parameters
n : int
The number of slices to create
axis : str or int
The axis to generate the slices along . Perpendicular to the slices .
Can be string name ( ` ` ' x ' ` ` , ` ` ' y ' ` ` , or ` ` ' z ' ` ` ) or axis index
( ` ` 0 ` ` , ` ` 1 ` ` , or ` ` 2 ` ` ) .
tolerance : float , optional
The toleranceerance to the edge of the dataset bounds to create the slices
generate _ triangles : bool , optional
If this is enabled ( ` ` False ` ` by default ) , the output will be
triangles otherwise , the output will be the intersection polygons .
contour : bool , optional
If True , apply a ` ` contour ` ` filter after slicing""" | axes = { 'x' : 0 , 'y' : 1 , 'z' : 2 }
output = vtki . MultiBlock ( )
if isinstance ( axis , int ) :
ax = axis
axis = list ( axes . keys ( ) ) [ list ( axes . values ( ) ) . index ( ax ) ]
elif isinstance ( axis , str ) :
try :
ax = axes [ axis ]
except KeyError :
raise RuntimeError ( 'Axis ({}) not understood' . format ( axis ) )
# get the locations along that axis
if tolerance is None :
tolerance = ( dataset . bounds [ ax * 2 + 1 ] - dataset . bounds [ ax * 2 ] ) * 0.01
rng = np . linspace ( dataset . bounds [ ax * 2 ] + tolerance , dataset . bounds [ ax * 2 + 1 ] - tolerance , n )
center = list ( dataset . center )
# Make each of the slices
for i in range ( n ) :
center [ ax ] = rng [ i ]
slc = DataSetFilters . slice ( dataset , normal = axis , origin = center , generate_triangles = generate_triangles , contour = contour )
output [ i , 'slice%.2d' % i ] = slc
return output |
def full_name ( self ) :
"""The full name of this route ' s view function , including the module path
and controller name , if any .""" | if not self . view_func :
return None
prefix = self . view_func . __module__
if self . _controller_cls :
prefix = f'{prefix}.{self._controller_cls.__name__}'
return f'{prefix}.{self.method_name}' |
def _convert_port_bindings ( self , value ) :
"""" PortBindings " : {
"6379 / tcp " : [
" HostIp " : " " ,
" HostPort " : " 6379" """ | converted = { }
if not value :
return converted
if isinstance ( value , list ) :
value = self . _convert_port_bindings_from_list ( value )
if isinstance ( value , dict ) :
for port_protocol , host_bindings in six . iteritems ( value ) :
if '/' in port_protocol :
port , protocol = port_protocol . split ( '/' )
if protocol not in ( 'tcp' , 'udp' ) :
raise ValueError ( 'only supported protocols are tcp and udp. {0} was passed.' . format ( protocol ) )
else :
port_protocol = "{0}/tcp" . format ( port_protocol )
converted [ port_protocol ] = [ ]
if isinstance ( host_bindings , list ) :
for host_binding in host_bindings :
if isinstance ( host_binding , dict ) :
if "host_port" not in host_binding :
raise ValueError ( "host_port must be provided." )
if 'host_ip' not in host_binding :
host_binding [ 'host_ip' ] = ''
converted [ port_protocol ] . append ( host_binding )
else :
raise TypeError ( "The host binding information must be a dict." )
else :
raise TypeError ( "The host binding information in port bindings must be in a list." )
return converted |
def _cnf_formula ( lexer , varname , nvars , nclauses ) :
"""Return a DIMACS CNF formula .""" | clauses = _clauses ( lexer , varname , nvars )
if len ( clauses ) < nclauses :
fstr = "formula has fewer than {} clauses"
raise Error ( fstr . format ( nclauses ) )
if len ( clauses ) > nclauses :
fstr = "formula has more than {} clauses"
raise Error ( fstr . format ( nclauses ) )
return ( 'and' , ) + clauses |
def _check_list_minions ( self , expr , greedy , ignore_missing = False ) : # pylint : disable = unused - argument
'''Return the minions found by looking via a list''' | if isinstance ( expr , six . string_types ) :
expr = [ m for m in expr . split ( ',' ) if m ]
minions = self . _pki_minions ( )
return { 'minions' : [ x for x in expr if x in minions ] , 'missing' : [ ] if ignore_missing else [ x for x in expr if x not in minions ] } |
def _add_kickoff_task ( cls , base_path , mapreduce_spec , eta , countdown , queue_name ) :
"""Enqueues a new kickoff task .""" | params = { "mapreduce_id" : mapreduce_spec . mapreduce_id }
# Task is not named so that it can be added within a transaction .
kickoff_task = taskqueue . Task ( url = base_path + "/kickoffjob_callback/" + mapreduce_spec . mapreduce_id , headers = util . _get_task_headers ( mapreduce_spec . mapreduce_id ) , params = params , eta = eta , countdown = countdown )
hooks = mapreduce_spec . get_hooks ( )
if hooks is not None :
try :
hooks . enqueue_kickoff_task ( kickoff_task , queue_name )
return
except NotImplementedError :
pass
kickoff_task . add ( queue_name , transactional = True ) |
def ListFileEntries ( self , base_path_specs , output_writer ) :
"""Lists file entries in the base path specification .
Args :
base _ path _ specs ( list [ dfvfs . PathSpec ] ) : source path specification .
output _ writer ( StdoutWriter ) : output writer .""" | for base_path_spec in base_path_specs :
file_system = resolver . Resolver . OpenFileSystem ( base_path_spec )
file_entry = resolver . Resolver . OpenFileEntry ( base_path_spec )
if file_entry is None :
logging . warning ( 'Unable to open base path specification:\n{0:s}' . format ( base_path_spec . comparable ) )
return
self . _ListFileEntry ( file_system , file_entry , '' , output_writer ) |
def CheckForNonStandardConstructs ( filename , clean_lines , linenum , nesting_state , error ) :
r"""Logs an error if we see certain non - ANSI constructs ignored by gcc - 2.
Complain about several constructs which gcc - 2 accepts , but which are
not standard C + + . Warning about these in lint is one way to ease the
transition to new compilers .
- put storage class first ( e . g . " static const " instead of " const static " ) .
- " % lld " instead of % qd " in printf - type functions .
- " % 1 $ d " is non - standard in printf - type functions .
- " \ % " is an undefined character escape sequence .
- text after # endif is not allowed .
- invalid inner - style forward declaration .
- > ? and < ? operators , and their > ? = and < ? = cousins .
Additionally , check for constructor / destructor style violations and reference
members , as it is very convenient to do so while checking for
gcc - 2 compliance .
Args :
filename : The name of the current file .
clean _ lines : A CleansedLines instance containing the file .
linenum : The number of the line to check .
nesting _ state : A NestingState instance which maintains information about
the current stack of nested blocks being parsed .
error : A callable to which errors are reported , which takes 4 arguments :
filename , line number , error level , and message""" | # Remove comments from the line , but leave in strings for now .
line = clean_lines . lines [ linenum ]
if Search ( r'printf\s*\(.*".*%[-+ ]?\d*q' , line ) :
error ( filename , linenum , 'runtime/printf_format' , 3 , '%q in format strings is deprecated. Use %ll instead.' )
if Search ( r'printf\s*\(.*".*%\d+\$' , line ) :
error ( filename , linenum , 'runtime/printf_format' , 2 , '%N$ formats are unconventional. Try rewriting to avoid them.' )
# Remove escaped backslashes before looking for undefined escapes .
line = line . replace ( '\\\\' , '' )
if Search ( r'("|\').*\\(%|\[|\(|{)' , line ) :
error ( filename , linenum , 'build/printf_format' , 3 , '%, [, (, and { are undefined character escapes. Unescape them.' )
# For the rest , work with both comments and strings removed .
line = clean_lines . elided [ linenum ]
if Search ( r'\b(const|volatile|void|char|short|int|long' r'|float|double|signed|unsigned' r'|schar|u?int8|u?int16|u?int32|u?int64)' r'\s+(register|static|extern|typedef)\b' , line ) :
error ( filename , linenum , 'build/storage_class' , 5 , 'Storage class (static, extern, typedef, etc) should be first.' )
if Match ( r'\s*#\s*endif\s*[^/\s]+' , line ) :
error ( filename , linenum , 'build/endif_comment' , 5 , 'Uncommented text after #endif is non-standard. Use a comment.' )
if Match ( r'\s*class\s+(\w+\s*::\s*)+\w+\s*;' , line ) :
error ( filename , linenum , 'build/forward_decl' , 5 , 'Inner-style forward declarations are invalid. Remove this line.' )
if Search ( r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?' , line ) :
error ( filename , linenum , 'build/deprecated' , 3 , '>? and <? (max and min) operators are non-standard and deprecated.' )
if Search ( r'^\s*const\s*string\s*&\s*\w+\s*;' , line ) : # TODO ( unknown ) : Could it be expanded safely to arbitrary references ,
# without triggering too many false positives ? The first
# attempt triggered 5 warnings for mostly benign code in the regtest , hence
# the restriction .
# Here ' s the original regexp , for the reference :
# type _ name = r ' \ w + ( ( \ s * : : \ s * \ w + ) | ( \ s * < \ s * \ w + ? \ s * > ) ) ? '
# r ' \ s * const \ s * ' + type _ name + ' \ s * & \ s * \ w + \ s * ; '
error ( filename , linenum , 'runtime/member_string_references' , 2 , 'const string& members are dangerous. It is much better to use ' 'alternatives, such as pointers or simple constants.' )
# Everything else in this function operates on class declarations .
# Return early if the top of the nesting stack is not a class , or if
# the class head is not completed yet .
classinfo = nesting_state . InnermostClass ( )
if not classinfo or not classinfo . seen_open_brace :
return
# The class may have been declared with namespace or classname qualifiers .
# The constructor and destructor will not have those qualifiers .
base_classname = classinfo . name . split ( '::' ) [ - 1 ]
# Look for single - argument constructors that aren ' t marked explicit .
# Technically a valid construct , but against style . Also look for
# non - single - argument constructors which are also technically valid , but
# strongly suggest something is wrong .
explicit_constructor_match = Match ( r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*' r'\(((?:[^()]|\([^()]*\))*)\)' % re . escape ( base_classname ) , line )
if explicit_constructor_match :
is_marked_explicit = explicit_constructor_match . group ( 1 )
if not explicit_constructor_match . group ( 2 ) :
constructor_args = [ ]
else :
constructor_args = explicit_constructor_match . group ( 2 ) . split ( ',' )
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don ' t split arguments in two
i = 0
while i < len ( constructor_args ) :
constructor_arg = constructor_args [ i ]
while ( constructor_arg . count ( '<' ) > constructor_arg . count ( '>' ) or constructor_arg . count ( '(' ) > constructor_arg . count ( ')' ) ) :
constructor_arg += ',' + constructor_args [ i + 1 ]
del constructor_args [ i + 1 ]
constructor_args [ i ] = constructor_arg
i += 1
defaulted_args = [ arg for arg in constructor_args if '=' in arg ]
noarg_constructor = ( not constructor_args or # empty arg list
# ' void ' arg specifier
( len ( constructor_args ) == 1 and constructor_args [ 0 ] . strip ( ) == 'void' ) )
onearg_constructor = ( ( len ( constructor_args ) == 1 and # exactly one arg
not noarg_constructor ) or # all but at most one arg defaulted
( len ( constructor_args ) >= 1 and not noarg_constructor and len ( defaulted_args ) >= len ( constructor_args ) - 1 ) )
initializer_list_constructor = bool ( onearg_constructor and Search ( r'\bstd\s*::\s*initializer_list\b' , constructor_args [ 0 ] ) )
copy_constructor = bool ( onearg_constructor and Match ( r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&' % re . escape ( base_classname ) , constructor_args [ 0 ] . strip ( ) ) )
if ( not is_marked_explicit and onearg_constructor and not initializer_list_constructor and not copy_constructor ) :
if defaulted_args :
error ( filename , linenum , 'runtime/explicit' , 5 , 'Constructors callable with one argument ' 'should be marked explicit.' )
else :
error ( filename , linenum , 'runtime/explicit' , 5 , 'Single-parameter constructors should be marked explicit.' )
elif is_marked_explicit and not onearg_constructor :
if noarg_constructor :
error ( filename , linenum , 'runtime/explicit' , 5 , 'Zero-parameter constructors should not be marked explicit.' )
else :
error ( filename , linenum , 'runtime/explicit' , 0 , 'Constructors that require multiple arguments ' 'should not be marked explicit.' ) |
def _get_hosted_zone_limit ( self , limit_type , hosted_zone_id ) :
"""Return a hosted zone limit [ recordsets | vpc _ associations ]
: rtype : dict""" | result = self . conn . get_hosted_zone_limit ( Type = limit_type , HostedZoneId = hosted_zone_id )
return result |
def _add_to_batch ( self , partition_key , row_key , request ) :
'''Validates batch - specific rules .
: param str partition _ key :
PartitionKey of the entity .
: param str row _ key :
RowKey of the entity .
: param request :
the request to insert , update or delete entity''' | # All same partition keys
if self . _partition_key :
if self . _partition_key != partition_key :
raise AzureBatchValidationError ( _ERROR_INCORRECT_PARTITION_KEY_IN_BATCH )
else :
self . _partition_key = partition_key
# All different row keys
if row_key in self . _row_keys :
raise AzureBatchValidationError ( _ERROR_DUPLICATE_ROW_KEY_IN_BATCH )
else :
self . _row_keys . append ( row_key )
# 100 entities
if len ( self . _requests ) >= 100 :
raise AzureBatchValidationError ( _ERROR_TOO_MANY_ENTITIES_IN_BATCH )
# Add the request to the batch
self . _requests . append ( ( row_key , request ) ) |
def trigger_hook ( self , __name , * args , ** kwargs ) :
'''Trigger a hook and return a list of results .''' | return [ hook ( * args , ** kwargs ) for hook in self . _hooks [ __name ] [ : ] ] |
def most_even ( number , group ) :
"""Divide a number into a list of numbers as even as possible .""" | count , rest = divmod ( number , group )
counts = zip_longest ( [ count ] * group , [ 1 ] * rest , fillvalue = 0 )
chunks = [ sum ( one ) for one in counts ]
logging . debug ( 'chunks: %s' , chunks )
return chunks |
def process_exception ( self , request , exception ) :
"""Add user details .""" | if request . user and hasattr ( request . user , 'email' ) :
request . META [ 'USER' ] = request . user . email |
def extract_date ( value ) :
"""Convert timestamp to datetime and set everything to zero except a date""" | dtime = value . to_datetime ( )
dtime = ( dtime - timedelta ( hours = dtime . hour ) - timedelta ( minutes = dtime . minute ) - timedelta ( seconds = dtime . second ) - timedelta ( microseconds = dtime . microsecond ) )
return dtime |
def set_device_scale ( self , x_scale , y_scale ) :
"""Sets a scale that is multiplied to the device coordinates determined
by the CTM when drawing to surface .
One common use for this is to render to very high resolution display
devices at a scale factor , so that code that assumes 1 pixel will be a
certain size will still work . Setting a transformation via
cairo _ translate ( ) isn ' t sufficient to do this , since functions like
cairo _ device _ to _ user ( ) will expose the hidden scale .
Note that the scale affects drawing to the surface as well as using the
surface in a source pattern .
: param x _ scale : the scale in the X direction , in device units .
: param y _ scale : the scale in the Y direction , in device units .
* New in cairo 1.14 . *
* New in cairocffi 0.9 . *""" | cairo . cairo_surface_set_device_scale ( self . _pointer , x_scale , y_scale )
self . _check_status ( ) |
def run_plasmid_extractor ( self ) :
"""Create and run the plasmid extractor system call""" | logging . info ( 'Extracting plasmids' )
# Define the system call
extract_command = 'PlasmidExtractor.py -i {inf} -o {outf} -p {plasdb} -d {db} -t {cpus} -nc' . format ( inf = self . path , outf = self . plasmid_output , plasdb = os . path . join ( self . plasmid_db , 'plasmid_db.fasta' ) , db = self . plasmid_db , cpus = self . cpus )
# Only attempt to extract plasmids if the report doesn ' t already exist
if not os . path . isfile ( self . plasmid_report ) : # Run the system calls
out , err = run_subprocess ( extract_command )
# Acquire thread lock , and write the logs to file
self . threadlock . acquire ( )
write_to_logfile ( extract_command , extract_command , self . logfile )
write_to_logfile ( out , err , self . logfile )
self . threadlock . release ( ) |
def project_activity ( index , start , end ) :
"""Compute the metrics for the project activity section of the enriched
github pull requests index .
Returns a dictionary containing a " metric " key . This key contains the
metrics for this section .
: param index : index object
: param start : start date to get the data from
: param end : end date to get the data upto
: return : dictionary with the value of the metrics""" | results = { "metrics" : [ SubmittedPRs ( index , start , end ) , ClosedPRs ( index , start , end ) ] }
return results |
def _get_device_template ( disk , disk_info , template = None ) :
'''Returns the template format to create a disk in open nebula
. . versionadded : : 2018.3.0''' | def _require_disk_opts ( * args ) :
for arg in args :
if arg not in disk_info :
raise SaltCloudSystemExit ( 'The disk {0} requires a {1}\
argument' . format ( disk , arg ) )
_require_disk_opts ( 'disk_type' , 'size' )
size = disk_info [ 'size' ]
disk_type = disk_info [ 'disk_type' ]
if disk_type == 'clone' :
if 'image' in disk_info :
clone_image = disk_info [ 'image' ]
else :
clone_image = get_template_image ( kwargs = { 'name' : template } )
clone_image_id = get_image_id ( kwargs = { 'name' : clone_image } )
temp = 'DISK=[IMAGE={0}, IMAGE_ID={1}, CLONE=YES,\
SIZE={2}]' . format ( clone_image , clone_image_id , size )
return temp
if disk_type == 'volatile' :
_require_disk_opts ( 'type' )
v_type = disk_info [ 'type' ]
temp = 'DISK=[TYPE={0}, SIZE={1}]' . format ( v_type , size )
if v_type == 'fs' :
_require_disk_opts ( 'format' )
format = disk_info [ 'format' ]
temp = 'DISK=[TYPE={0}, SIZE={1}, FORMAT={2}]' . format ( v_type , size , format )
return temp |
def filter_record ( self , record ) :
"""Filter a single record""" | quality_scores = record . letter_annotations [ 'phred_quality' ]
# Simple case - window covers whole sequence
if len ( record ) <= self . window_size :
mean_score = mean ( quality_scores )
if mean_score >= self . min_mean_score :
return record
else :
raise FailedFilter ( mean_score )
# Find the right clipping point . Start clipping at the beginning of the
# sequence , then extend the window to include regions with acceptable
# mean quality scores .
clip_right = 0
for i , a in enumerate ( moving_average ( quality_scores , self . window_size ) ) :
if a >= self . min_mean_score :
clip_right = i + self . window_size
else :
break
if clip_right :
return record [ : clip_right ]
else : # First window failed - record fails
raise FailedFilter ( ) |
def clear_unattached_processes ( self ) :
"""Removes Process objects from the snapshot
referring to processes not being debugged .""" | for pid in self . get_process_ids ( ) :
aProcess = self . get_process ( pid )
if not aProcess . is_being_debugged ( ) :
self . _del_process ( aProcess ) |
def get_files_from_textfile ( textfile_handler ) :
"""Yield the file names and widths by parsing a text file handler .""" | for line in textfile_handler :
line = line . rstrip ( )
try :
( image_name , width ) = line . rsplit ( ',' , 1 )
width = int ( width )
except ValueError :
image_name = line
width = None
yield ( image_name , width ) |
def get_lines ( self , force = False ) :
"""Return a list of lists or strings , representing the code body .
Each list is a block , each string is a statement .
force ( True or False ) : if an attribute object cannot be included ,
it is usually skipped to be processed later . With ' force ' set , there
will be no waiting : a get _ or _ create ( ) call is written instead .""" | code_lines = [ ]
# Don ' t return anything if this is an instance that should be skipped
if self . skip ( ) :
return [ ]
# Initialise our new object
# e . g . model _ name _ 35 = Model ( )
code_lines += self . instantiate ( )
# Add each field
# e . g . model _ name _ 35 . field _ one = 1034.91
# model _ name _ 35 . field _ two = " text "
code_lines += self . get_waiting_list ( )
if force : # TODO : Check that M2M are not affected
code_lines += self . get_waiting_list ( force = force )
# Print the save command for our new object
# e . g . model _ name _ 35 . save ( )
if code_lines :
code_lines . append ( "%s = importer.save_or_locate(%s)\n" % ( self . variable_name , self . variable_name ) )
code_lines += self . get_many_to_many_lines ( force = force )
return code_lines |
def manage_pump ( self , operation ) :
"""Updates control module knowledge of pump requests .
If any sensor module requests water , the pump will turn on .""" | if operation == "on" :
self . controls [ "pump" ] = "on"
elif operation == "off" :
self . controls [ "pump" ] = "off"
return True |
def add_layer3_vlan_cluster_interface ( self , interface_id , vlan_id , nodes = None , cluster_virtual = None , network_value = None , macaddress = None , cvi_mode = 'packetdispatch' , zone_ref = None , comment = None , ** kw ) :
"""Add IP addresses to VLANs on a firewall cluster . The minimum params
required are ` ` interface _ id ` ` and ` ` vlan _ id ` ` .
To create a VLAN interface with a CVI , specify ` ` cluster _ virtual ` ` ,
` ` cluster _ mask ` ` and ` ` macaddress ` ` .
To create a VLAN with only NDI , specify ` ` nodes ` ` parameter .
Nodes data structure is expected to be in this format : :
nodes = [ { ' address ' : ' 5.5.5.2 ' , ' network _ value ' : ' 5.5.5.0/24 ' , ' nodeid ' : 1 } ,
{ ' address ' : ' 5.5.5.3 ' , ' network _ value ' : ' 5.5.5.0/24 ' , ' nodeid ' : 2 } ]
: param str , int interface _ id : interface id to assign VLAN .
: param str , int vlan _ id : vlan identifier
: param list nodes : optional addresses for node interfaces ( NDI ' s ) . For a cluster ,
each node will require an address specified using the nodes format .
: param str cluster _ virtual : cluster virtual ip address ( optional ) . If specified , cluster _ mask
parameter is required
: param str network _ value : Specifies the network address , i . e . if cluster virtual is 1.1.1.1,
cluster mask could be 1.1.1.0/24.
: param str macaddress : ( optional ) if used will provide the mapping from node interfaces
to participate in load balancing .
: param str cvi _ mode : cvi mode for cluster interface ( default : packetdispatch )
: param zone _ ref : zone to assign , can be name , str href or Zone
: param dict kw : keyword arguments are passed to top level of VLAN interface ,
not the base level physical interface . This is useful if you want to
pass in a configuration that enables the DHCP server on a VLAN for example .
: raises EngineCommandFailed : failure creating interface
: return : None
. . note : :
If the ` ` interface _ id ` ` specified already exists , it is still possible
to add additional VLANs and interface addresses .""" | interfaces = { 'nodes' : nodes if nodes else [ ] , 'cluster_virtual' : cluster_virtual , 'network_value' : network_value }
interfaces . update ( ** kw )
_interface = { 'interface_id' : interface_id , 'interfaces' : [ interfaces ] , 'macaddress' : macaddress , 'cvi_mode' : cvi_mode if macaddress else 'none' , 'zone_ref' : zone_ref , 'comment' : comment }
try :
interface = self . _engine . interface . get ( interface_id )
vlan = interface . vlan_interface . get ( vlan_id )
# Interface exists , so we need to update but check if VLAN already exists
if vlan is None :
interfaces . update ( vlan_id = vlan_id )
interface . _add_interface ( ** _interface )
else :
for k in ( 'macaddress' , 'cvi_mode' ) :
_interface . pop ( k )
_interface . update ( interface_id = '{}.{}' . format ( interface_id , vlan_id ) )
vlan . _add_interface ( ** _interface )
return interface . update ( )
except InterfaceNotFound :
interfaces . update ( vlan_id = vlan_id )
interface = ClusterPhysicalInterface ( ** _interface )
return self . _engine . add_interface ( interface ) |
def infer_shape ( self , node , input_shapes ) :
"""Return a list of output shapes based on ` ` input _ shapes ` ` .
This method is optional . It allows to compute the shape of the
output without having to evaluate .
Parameters
node : ` theano . gof . graph . Apply `
The node of this Op in the computation graph .
input _ shapes : 1 - element list of ` theano . compile . ops . Shape `
Symbolic shape of the input .
Returns
output _ shapes : 1 - element list of tuples
Fixed shape of the output determined by ` odl _ op ` .""" | if isinstance ( self . operator , Functional ) :
return [ ( ) ]
else : # Need to convert to native to avoid error in Theano from
# future . int
return [ tuple ( native ( si ) for si in self . operator . range . shape ) ] |
def user_to_uid ( user ) :
'''Convert user name to a uid
Args :
user ( str ) : The user to lookup
Returns :
str : The user id of the user
CLI Example :
. . code - block : : bash
salt ' * ' file . user _ to _ uid myusername''' | if user is None :
user = salt . utils . user . get_user ( )
return salt . utils . win_dacl . get_sid_string ( user ) |
def get_title ( mode = 'title' ) :
'''Return the terminal / console title .
Arguments :
str : mode , one of ( ' title ' , ' icon ' ) or int ( 20-21 ) :
see links below .
- ` Control sequences
< http : / / invisible - island . net / xterm / ctlseqs / ctlseqs . html # h2 - Operating - System - Commands > ` _
Returns :
title string , or None if not able to be found .
Note :
Experimental , few terms outside xterm support this correctly .
MATE Terminal returns " Terminal " .
iTerm returns " " .''' | title = None
if is_a_tty ( ) and not env . SSH_CLIENT :
if os_name == 'nt' :
from . windows import get_title
return get_title ( )
elif sys . platform == 'darwin' :
if env . TERM_PROGRAM and env . TERM_PROGRAM == 'iTerm.app' :
pass
else :
return
elif os_name == 'posix' :
pass
# xterm ( maybe iterm ) only support
import tty , termios
mode = _query_mode_map . get ( mode , mode )
query_sequence = f'{CSI}{mode}t'
try :
with TermStack ( ) as fd :
termios . tcflush ( fd , termios . TCIFLUSH )
# clear input
tty . setcbreak ( fd , termios . TCSANOW )
# shut off echo
sys . stdout . write ( query_sequence )
sys . stdout . flush ( )
resp = _read_until ( maxchars = 100 , end = ST )
except AttributeError : # no . fileno ( )
return title
# parse response
title = resp . lstrip ( OSC ) [ 1 : ] . rstrip ( ESC )
log . debug ( '%r' , title )
return title |
def zscan_iter ( self , name , match = None , count = None , score_cast_func = float ) :
"""Make an iterator using the ZSCAN command so that the client doesn ' t
need to remember the cursor position .
` ` match ` ` allows for filtering the keys by pattern
` ` count ` ` allows for hint the minimum number of returns
` ` score _ cast _ func ` ` a callable used to cast the score return value""" | if self . _pipe is not None :
raise InvalidOperation ( 'cannot pipeline scan operations' )
cursor = '0'
while cursor != 0 :
cursor , data = self . zscan ( name , cursor = cursor , match = match , count = count , score_cast_func = score_cast_func )
for item in data :
yield item |
def _active_keys_by_month ( ignore_internal_keys , monthly_minimum , cached = True ) :
"""Returns a dict of ( year , month ) - > active _ keys . The dict will contain a key
for each month observed in the data .""" | cache_key = '_active_keys_by_month({0!r},{1!r})[{date!s}]' . format ( ignore_internal_keys , monthly_minimum , date = datetime . date . today ( ) )
if cached == True :
result = cache . get ( cache_key )
if result is not None :
return result
keys_issued_period = _keys_issued_date_range ( )
# We first do a ( date , key ) aggregation for the number of daily calls .
# We would do monthly aggregation here if we were using a newer version
# of django with ORM month accessors . This rolls up the per - method reports .
calls_by_key = Report . objects
if ignore_internal_keys :
calls_by_key = exclude_internal_key_reports ( calls_by_key )
calls_by_key = ( calls_by_key . values ( 'date' , 'key__key' , 'key__email' ) . annotate ( calls = Sum ( 'calls' ) ) . order_by ( '-calls' ) )
# Aggregate the daily aggregates into monthly aggregates ( still on a per - key
# basis ) . This facilitates filtering keys by monthly usage .
grp_by = lambda r : ( r [ 'date' ] . year , r [ 'date' ] . month , r [ 'key__key' ] )
def sum_calls ( grp , records ) :
return { 'calls' : sum ( [ r [ 'calls' ] for r in records ] ) }
calls_per_key_monthly = generic_aggregation ( calls_by_key , key = grp_by , agg_func = sum_calls )
calls_per_key_monthly1 = ( ( grp , agg ) for ( grp , agg ) in calls_per_key_monthly . iteritems ( ) if agg [ 'calls' ] >= monthly_minimum )
# Now aggregate the ( year , month , key ) into the size of ( year , month ) groups .
grp_by_month = lambda ( ( year , month , key ) , agg ) : ( year , month )
active_keys_per_month = generic_aggregation ( calls_per_key_monthly1 , key = grp_by_month , agg_func = lambda grp , records : len ( records ) )
cache . set ( cache_key , active_keys_per_month , timeout = ( 60 * 60 * 25 ) )
return active_keys_per_month |
def get_selected_profiles ( self , registered_org = None , registered_name = None , registered_version = None ) :
"""Return the ` CIM _ RegisteredProfile ` instances representing a filtered
subset of the management profiles advertised by the WBEM server , that
can be filtered by registered organization , registered name , and / or
registered version .
Parameters :
registered _ org ( : term : ` string ` ) : A filter for the registered
organization of the profile , matching ( case insensitively ) the
` RegisteredOrganization ` property of the ` CIM _ RegisteredProfile `
instance , via its ` Values ` qualifier .
If ` None ` , this parameter is ignored for filtering .
registered _ name ( : term : ` string ` ) : A filter for the registered name
of the profile , matching ( case insensitively ) the
` RegisteredName ` property of the ` CIM _ RegisteredProfile `
instance .
If ` None ` , this parameter is ignored for filtering .
registered _ version ( : term : ` string ` ) : A filter for the registered
version of the profile , matching ( case insensitively ) the
` RegisteredVersion ` property of the ` CIM _ RegisteredProfile `
instance . Note that version strings may contain aplhabetic
characters to indicate the draft level .
If ` None ` , this parameter is ignored for filtering .
Returns :
: class : ` py : list ` of : class : ` ~ pywbem . CIMInstance ` : The
` CIM _ RegisteredProfile ` instances representing the filtered
subset of the management profiles advertised by the WBEM server .
Raises :
Exceptions raised by : class : ` ~ pywbem . WBEMConnection ` .
CIMError : CIM _ ERR _ NOT _ FOUND , Interop namespace could not be
determined .
KeyError : If an instance in the list of profiles is incomplete
and does not include the required properties .""" | org_vm = ValueMapping . for_property ( self , self . interop_ns , 'CIM_RegisteredProfile' , 'RegisteredOrganization' )
org_lower = registered_org . lower ( ) if registered_org is not None else None
name_lower = registered_name . lower ( ) if registered_name is not None else None
version_lower = registered_version . lower ( ) if registered_version is not None else None
rtn = [ ]
for inst in self . profiles :
try :
inst_org_value = inst [ 'RegisteredOrganization' ]
except KeyError :
raise KeyError ( _format ( "CIM_RegisteredProfile instance in namespace " "{0!A} does not have a property " "'RegisteredOrganization'" , self . interop_ns ) )
inst_org = org_vm . tovalues ( inst_org_value )
try :
inst_name = inst [ 'RegisteredName' ]
except KeyError :
raise KeyError ( _format ( "CIM_RegisteredProfile instance in namespace " "{0!A} does not have a property " "'RegisteredName'" , self . interop_ns ) )
try :
inst_version = inst [ 'RegisteredVersion' ]
except KeyError :
raise KeyError ( _format ( "CIM_RegisteredProfile instance in namespace " "{0!A} does not have a property " "'RegisteredVersion'" , self . interop_ns ) )
inst_org_lower = inst_org . lower ( ) if inst_org is not None else None
inst_name_lower = inst_name . lower ( ) if inst_name is not None else None
inst_version_lower = inst_version . lower ( ) if inst_version is not None else None
# pylint : disable = too - many - boolean - expressions
if ( org_lower is None or org_lower == inst_org_lower ) and ( name_lower is None or name_lower == inst_name_lower ) and ( version_lower is None or version_lower == inst_version_lower ) :
rtn . append ( inst )
return rtn |
def encode_request ( name , entries ) :
"""Encode request into client _ message""" | client_message = ClientMessage ( payload_size = calculate_size ( name , entries ) )
client_message . set_message_type ( REQUEST_TYPE )
client_message . set_retryable ( RETRYABLE )
client_message . append_str ( name )
client_message . append_int ( len ( entries ) )
for entries_item in six . iteritems ( entries ) :
client_message . append_tuple ( entries_item )
client_message . update_frame_length ( )
return client_message |
def rankingEval ( self ) :
'''Sorting the pop . base on the fitnessEval result''' | fitnessAll = numpy . zeros ( self . length )
fitnessNorm = numpy . zeros ( self . length )
for i in range ( self . length ) :
self . Ind [ i ] . fitnessEval ( )
fitnessAll [ i ] = self . Ind [ i ] . fitness
maxFitness = fitnessAll . max ( )
for i in range ( self . length ) :
fitnessNorm [ i ] = ( maxFitness - fitnessAll [ i ] ) / maxFitness
fitnessSorted = fitnessNorm . argsort ( )
# Compute the selection probabilities of each individual
probability = numpy . zeros ( self . length )
S = 2.0
for i in range ( self . length ) :
probability [ fitnessSorted [ i ] ] = ( ( 2 - S ) / self . length ) + ( 2 * i * ( S - 1 ) ) / ( self . length * ( self . length - 1 ) )
self . rankingComputed = 1
self . fitness = fitnessAll
return [ fitnessAll , fitnessSorted [ : : - 1 ] , probability ] |
def _typedef ( obj , derive = False , infer = False ) :
'''Create a new typedef for an object .''' | t = type ( obj )
v = _Typedef ( base = _basicsize ( t , obj = obj ) , kind = _kind_dynamic , type = t )
# # _ printf ( ' new % r % r / % r % s ' , t , _ basicsize ( t ) , _ itemsize ( t ) , _ repr ( dir ( obj ) ) )
if ismodule ( obj ) : # handle module like dict
v . dup ( item = _dict_typedef . item + _sizeof_CPyModuleObject , leng = _len_module , refs = _module_refs )
elif isframe ( obj ) :
v . set ( base = _basicsize ( t , base = _sizeof_CPyFrameObject , obj = obj ) , item = _itemsize ( t ) , leng = _len_frame , refs = _frame_refs )
elif iscode ( obj ) :
v . set ( base = _basicsize ( t , base = _sizeof_CPyCodeObject , obj = obj ) , item = _sizeof_Cvoidp , leng = _len_code , refs = _co_refs , both = False )
# code only
elif _callable ( obj ) :
if isclass ( obj ) : # class or type
v . set ( refs = _class_refs , both = False )
# code only
if obj . __module__ in _builtin_modules :
v . set ( kind = _kind_ignored )
elif isbuiltin ( obj ) : # function or method
v . set ( both = False , # code only
kind = _kind_ignored )
elif isfunction ( obj ) :
v . set ( refs = _func_refs , both = False )
# code only
elif ismethod ( obj ) :
v . set ( refs = _im_refs , both = False )
# code only
elif isclass ( t ) : # callable instance , e . g . SCons ,
# handle like any other instance further below
v . set ( item = _itemsize ( t ) , safe_len = True , refs = _inst_refs )
# not code only !
else :
v . set ( both = False )
# code only
elif _issubclass ( t , dict ) :
v . dup ( kind = _kind_derived )
elif _isdictclass ( obj ) or ( infer and _infer_dict ( obj ) ) :
v . dup ( kind = _kind_inferred )
elif getattr ( obj , '__module__' , None ) in _builtin_modules :
v . set ( kind = _kind_ignored )
else : # assume an instance of some class
if derive :
p = _derive_typedef ( t )
if p : # duplicate parent
v . dup ( other = p , kind = _kind_derived )
return v
if _issubclass ( t , Exception ) :
v . set ( item = _itemsize ( t ) , safe_len = True , refs = _exc_refs , kind = _kind_derived )
elif isinstance ( obj , Exception ) :
v . set ( item = _itemsize ( t ) , safe_len = True , refs = _exc_refs )
else :
v . set ( item = _itemsize ( t ) , safe_len = True , refs = _inst_refs )
return v |
def _case_insensitive_rpartition ( input_string : str , separator : str ) -> typing . Tuple [ str , str , str ] :
"""Same as str . rpartition ( ) , except that the partitioning is done case insensitive .""" | lowered_input_string = input_string . lower ( )
lowered_separator = separator . lower ( )
try :
split_index = lowered_input_string . rindex ( lowered_separator )
except ValueError : # Did not find the separator in the input _ string .
# Follow https : / / docs . python . org / 3 / library / stdtypes . html # text - sequence - type - str
# str . rpartition documentation and return the tuple ( " " , " " , unmodified _ input ) in this case
return "" , "" , input_string
else :
split_index_2 = split_index + len ( separator )
return input_string [ : split_index ] , input_string [ split_index : split_index_2 ] , input_string [ split_index_2 : ] |
def is_resource_protected ( self , request , ** kwargs ) :
"""Determines if a resource should be protected .
Returns true if and only if the resource ' s access _ state matches an entry in
the return value of get _ protected _ states ( ) .""" | access_state = self . _get_resource_access_state ( request )
protected_states = self . get_protected_states ( )
return access_state in protected_states |
def add_size_info ( self ) :
"""Get size of file content and modification time from filename path .""" | if self . is_directory ( ) : # Directory size always differs from the customer index . html
# that is generated . So return without calculating any size .
return
filename = self . get_os_filename ( )
self . size = fileutil . get_size ( filename )
self . modified = datetime . utcfromtimestamp ( fileutil . get_mtime ( filename ) ) |
def series ( * coros_or_futures , timeout = None , loop = None , return_exceptions = False ) :
"""Run the given coroutine functions in series , each one
running once the previous execution has completed .
If any coroutines raises an exception , no more
coroutines are executed . Otherwise , the coroutines returned values
will be returned as ` list ` .
` ` timeout ` ` can be used to control the maximum number of seconds to
wait before returning . timeout can be an int or float .
If timeout is not specified or None , there is no limit to the wait time .
If ` ` return _ exceptions ` ` is True , exceptions in the tasks are treated the
same as successful results , and gathered in the result list ; otherwise ,
the first raised exception will be immediately propagated to the
returned future .
All futures must share the same event loop .
This functions is basically the sequential execution version of
` ` asyncio . gather ( ) ` ` . Interface compatible with ` ` asyncio . gather ( ) ` ` .
This function is a coroutine .
Arguments :
* coros _ or _ futures ( iter | list ) :
an iterable collection yielding coroutines functions .
timeout ( int / float ) :
maximum number of seconds to wait before returning .
return _ exceptions ( bool ) :
exceptions in the tasks are treated the same as successful results ,
instead of raising them .
loop ( asyncio . BaseEventLoop ) :
optional event loop to use .
* args ( mixed ) :
optional variadic argument to pass to the coroutines function .
Returns :
list : coroutines returned results .
Raises :
TypeError : in case of invalid coroutine object .
ValueError : in case of empty set of coroutines or futures .
TimeoutError : if execution takes more than expected .
Usage : :
async def sum ( x , y ) :
return x + y
await paco . series (
sum ( 1 , 2 ) ,
sum ( 2 , 3 ) ,
sum ( 3 , 4 ) )
# = > [ 3 , 5 , 7]""" | return ( yield from gather ( * coros_or_futures , loop = loop , limit = 1 , timeout = timeout , return_exceptions = return_exceptions ) ) |
async def fini ( self ) :
'''Shut down the object and notify any onfini ( ) coroutines .
Returns :
Remaining ref count''' | assert self . anitted , f'{self.__class__.__name__} initialized improperly. Must use Base.anit class method.'
if self . isfini :
return
if __debug__ :
import synapse . lib . threads as s_threads
# avoid import cycle
assert s_threads . iden ( ) == self . tid
self . _syn_refs -= 1
if self . _syn_refs > 0 :
return self . _syn_refs
self . isfini = True
fevt = self . finievt
if fevt is not None :
fevt . set ( )
for base in list ( self . tofini ) :
await base . fini ( )
try :
await self . _kill_active_tasks ( )
except :
logger . exception ( f'{self} - Exception during _kill_active_tasks' )
for fini in self . _fini_funcs :
try :
await s_coro . ornot ( fini )
except asyncio . CancelledError :
raise
except Exception :
logger . exception ( f'{self} - fini function failed: {fini}' )
self . _syn_funcs . clear ( )
self . _fini_funcs . clear ( )
return 0 |
def login ( self , password = '' , captcha = '' , email_code = '' , twofactor_code = '' , language = 'english' ) :
"""Attempts web login and returns on a session with cookies set
: param password : password , if it wasn ' t provided on instance init
: type password : : class : ` str `
: param captcha : text reponse for captcha challenge
: type captcha : : class : ` str `
: param email _ code : email code for steam guard
: type email _ code : : class : ` str `
: param twofactor _ code : 2FA code for steam guard
: type twofactor _ code : : class : ` str `
: param language : select language for steam web pages ( sets language cookie )
: type language : : class : ` str `
: return : a session on success and : class : ` None ` otherwise
: rtype : : class : ` requests . Session ` , : class : ` None `
: raises HTTPError : any problem with http request , timeouts , 5xx , 4xx etc
: raises LoginIncorrect : wrong username or password
: raises CaptchaRequired : when captcha is needed
: raises CaptchaRequiredLoginIncorrect : when captcha is needed and login is incorrect
: raises EmailCodeRequired : when email is needed
: raises TwoFactorCodeRequired : when 2FA is needed""" | if self . logged_on :
return self . session
if password :
self . password = password
else :
if self . password :
password = self . password
else :
raise LoginIncorrect ( "password is not specified" )
if not captcha and self . captcha_code :
captcha = self . captcha_code
self . _load_key ( )
resp = self . _send_login ( password = password , captcha = captcha , email_code = email_code , twofactor_code = twofactor_code )
if resp [ 'success' ] and resp [ 'login_complete' ] :
self . logged_on = True
self . password = self . captcha_code = ''
self . captcha_gid = - 1
for cookie in list ( self . session . cookies ) :
for domain in [ 'store.steampowered.com' , 'help.steampowered.com' , 'steamcommunity.com' ] :
self . session . cookies . set ( cookie . name , cookie . value , domain = domain , secure = cookie . secure )
self . session_id = generate_session_id ( )
for domain in [ 'store.steampowered.com' , 'help.steampowered.com' , 'steamcommunity.com' ] :
self . session . cookies . set ( 'Steam_Language' , language , domain = domain )
self . session . cookies . set ( 'birthtime' , '-3333' , domain = domain )
self . session . cookies . set ( 'sessionid' , self . session_id , domain = domain )
self . _finalize_login ( resp )
return self . session
else :
if resp . get ( 'captcha_needed' , False ) :
self . captcha_gid = resp [ 'captcha_gid' ]
self . captcha_code = ''
if resp . get ( 'clear_password_field' , False ) :
self . password = ''
raise CaptchaRequiredLoginIncorrect ( resp [ 'message' ] )
else :
raise CaptchaRequired ( resp [ 'message' ] )
elif resp . get ( 'emailauth_needed' , False ) :
self . steam_id = SteamID ( resp [ 'emailsteamid' ] )
raise EmailCodeRequired ( resp [ 'message' ] )
elif resp . get ( 'requires_twofactor' , False ) :
raise TwoFactorCodeRequired ( resp [ 'message' ] )
else :
self . password = ''
raise LoginIncorrect ( resp [ 'message' ] )
return None |
def directive ( self , name , default = None ) :
"""Returns the loaded directive with the specified name , or default if passed name is not present""" | return getattr ( self , '_directives' , { } ) . get ( name , hug . defaults . directives . get ( name , default ) ) |
def attention_lm_ae_extended ( ) :
"""Experiment with the exp _ factor params .""" | hparams = attention_lm_moe_base_long_seq ( )
hparams . attention_layers = "eeee"
hparams . attention_local = True
# hparams . factored _ logits = 1 # Necessary when the number of expert grow bigger
hparams . attention_moe_k = 2
hparams . attention_exp_factor = 4
# hparams . attention _ exp _ inputdim = 128
hparams . layer_preprocess_sequence = "n"
hparams . layer_postprocess_sequence = "da"
return hparams |
def out_of_china ( lng , lat ) :
"""判断是否在国内 , 不在国内不做偏移
: param lng :
: param lat :
: return :""" | if lng < 72.004 or lng > 137.8347 :
return True
if lat < 0.8293 or lat > 55.8271 :
return True
return False |
def grid_reload_from_ids ( oargrid_jobids ) :
"""Reload all running or pending jobs of Grid ' 5000 from their ids
Args :
oargrid _ jobids ( list ) : list of ` ` ( site , oar _ jobid ) ` ` identifying the
jobs on each site
Returns :
The list of python - grid5000 jobs retrieved""" | gk = get_api_client ( )
jobs = [ ]
for site , job_id in oargrid_jobids :
jobs . append ( gk . sites [ site ] . jobs [ job_id ] )
return jobs |
def add_attribute_model ( self , name , # type : str
attr , # type : AttributeModel
writeable_func = None , # type : Optional [ Callable ]
) : # type : ( . . . ) - > AttributeModel
"""Register a pre - existing AttributeModel to be added to the Block""" | return self . _field_registry . add_attribute_model ( name , attr , writeable_func , self . _part ) |
def run ( self , series , exponent = None ) :
''': type series : List
: type exponent : int
: rtype : float''' | try :
return self . calculateHurst ( series , exponent )
except Exception as e :
print ( " Error: %s" % e ) |
def to_native ( self , value , context = None ) :
"""Schematics deserializer override
We return a us . states . State object so the abbreviation
or long name can be trivially accessed . Additionally ,
some geo type ops are available .
TIP : The us . states library expects unicode so we
must ensure value is encoded properly .
: return : us . states . State""" | if isinstance ( value , us . states . State ) :
return value
try :
state = us . states . lookup ( unicode ( value ) )
if not state :
raise TypeError
return state
except TypeError :
raise ConversionError ( self . messages [ 'convert' ] ) |
def inspect ( orm_class , attribute_name ) :
""": param attribute _ name : name of the mapped attribute to inspect .
: returns : list of 2 - tuples containing information about the inspected
attribute ( first element : mapped entity attribute kind ; second
attribute : mapped entity attribute )""" | key = ( orm_class , attribute_name )
elems = OrmAttributeInspector . __cache . get ( key )
if elems is None :
elems = OrmAttributeInspector . __inspect ( key )
OrmAttributeInspector . __cache [ key ] = elems
return elems |
def Convert ( self , metadata , value , token = None ) :
"""Converts a single ArtifactFilesDownloaderResult .""" | for r in self . BatchConvert ( [ ( metadata , value ) ] , token = token ) :
yield r |
def fetch ( self ) :
"""Fetch a AuthorizedConnectAppInstance
: returns : Fetched AuthorizedConnectAppInstance
: rtype : twilio . rest . api . v2010 . account . authorized _ connect _ app . AuthorizedConnectAppInstance""" | params = values . of ( { } )
payload = self . _version . fetch ( 'GET' , self . _uri , params = params , )
return AuthorizedConnectAppInstance ( self . _version , payload , account_sid = self . _solution [ 'account_sid' ] , connect_app_sid = self . _solution [ 'connect_app_sid' ] , ) |
def _credssp_processor ( self , context ) :
"""Implements a state machine
: return :""" | http_response = ( yield )
credssp_context = self . _get_credssp_header ( http_response )
if credssp_context is None :
raise Exception ( 'The remote host did not respond with a \'www-authenticate\' header containing ' '\'CredSSP\' as an available authentication mechanism' )
# 1 . First , secure the channel with a TLS Handshake
if not credssp_context :
self . tls_connection = SSL . Connection ( self . tls_context )
self . tls_connection . set_connect_state ( )
while True :
try :
self . tls_connection . do_handshake ( )
except SSL . WantReadError :
http_response = yield self . _set_credssp_header ( http_response . request , self . tls_connection . bio_read ( 4096 ) )
credssp_context = self . _get_credssp_header ( http_response )
if credssp_context is None or not credssp_context :
raise Exception ( 'The remote host rejected the CredSSP TLS handshake' )
self . tls_connection . bio_write ( credssp_context )
else :
break
# add logging to display the negotiated cipher ( move to a function )
openssl_lib = _util . binding . lib
ffi = _util . binding . ffi
cipher = openssl_lib . SSL_get_current_cipher ( self . tls_connection . _ssl )
cipher_name = ffi . string ( openssl_lib . SSL_CIPHER_get_name ( cipher ) )
log . debug ( "Negotiated TLS Cipher: %s" , cipher_name )
# 2 . Send an TSRequest containing an NTLM Negotiate Request
context_generator = context . initialize_security_context ( )
negotiate_token = context_generator . send ( None )
log . debug ( "NTLM Type 1: %s" , AsHex ( negotiate_token ) )
ts_request = TSRequest ( )
ts_request [ 'negoTokens' ] = negotiate_token
self . tls_connection . send ( ts_request . getData ( ) )
http_response = yield self . _set_credssp_header ( http_response . request , self . tls_connection . bio_read ( 4096 ) )
# Extract and decrypt the encoded TSRequest response struct from the Negotiate header
authenticate_header = self . _get_credssp_header ( http_response )
if not authenticate_header or authenticate_header is None :
raise Exception ( "The remote host rejected the CredSSP negotiation token" )
self . tls_connection . bio_write ( authenticate_header )
# NTLM Challenge Response and Server Public Key Validation
ts_request = TSRequest ( )
ts_request . fromString ( self . tls_connection . recv ( 8192 ) )
challenge_token = ts_request [ 'negoTokens' ]
log . debug ( "NTLM Type 2: %s" , AsHex ( challenge_token ) )
server_cert = self . tls_connection . get_peer_certificate ( )
# not using channel bindings
# certificate _ digest = base64 . b16decode ( server _ cert . digest ( ' SHA256 ' ) . replace ( ' : ' , ' ' ) )
# # channel _ binding _ structure = gss _ channel _ bindings _ struct ( )
# # channel _ binding _ structure [ ' application _ data ' ] = " tls - server - end - point : " + certificate _ digest
public_key = HttpCredSSPAuth . _get_rsa_public_key ( server_cert )
# The _ RSAPublicKey must be ' wrapped ' using the negotiated GSSAPI mechanism and send to the server along with
# the final SPNEGO token . This step of the CredSSP protocol is designed to thwart ' man - in - the - middle ' attacks
# Build and encrypt the response to the server
ts_request = TSRequest ( )
type3 = context_generator . send ( challenge_token )
log . debug ( "NTLM Type 3: %s" , AsHex ( type3 ) )
ts_request [ 'negoTokens' ] = type3
public_key_encrypted , signature = context . wrap_message ( public_key )
ts_request [ 'pubKeyAuth' ] = signature + public_key_encrypted
self . tls_connection . send ( ts_request . getData ( ) )
enc_type3 = self . tls_connection . bio_read ( 8192 )
http_response = yield self . _set_credssp_header ( http_response . request , enc_type3 )
# TLS decrypt the response , then ASN decode and check the error code
auth_response = self . _get_credssp_header ( http_response )
if not auth_response or auth_response is None :
raise Exception ( "The remote host rejected the challenge response" )
self . tls_connection . bio_write ( auth_response )
ts_request = TSRequest ( )
ts_request . fromString ( self . tls_connection . recv ( 8192 ) )
# TODO : determine how to validate server certificate here
# a = ts _ request [ ' pubKeyAuth ' ]
# print " : " . join ( " { : 02x } " . format ( ord ( c ) ) for c in a )
# 4 . Send the Credentials to be delegated , these are encrypted with both NTLM v2 and then by TLS
tsp = TSPasswordCreds ( )
tsp [ 'domain' ] = self . password_authenticator . get_domain ( )
tsp [ 'username' ] = self . password_authenticator . get_username ( )
tsp [ 'password' ] = self . password_authenticator . get_password ( )
tsc = TSCredentials ( )
tsc [ 'type' ] = 1
tsc [ 'credentials' ] = tsp . getData ( )
ts_request = TSRequest ( )
encrypted , signature = context . wrap_message ( tsc . getData ( ) )
ts_request [ 'authInfo' ] = signature + encrypted
self . tls_connection . send ( ts_request . getData ( ) )
token = self . tls_connection . bio_read ( 8192 )
http_response . request . body = self . body
http_response = yield self . _set_credssp_header ( self . _encrypt ( http_response . request , self . tls_connection ) , token )
if http_response . status_code == 401 :
raise Exception ( 'Authentication Failed' ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.