signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def cmd_velocity ( self , args ) :
'''velocity x - ms y - ms z - ms''' | if ( len ( args ) != 3 ) :
print ( "Usage: velocity x y z (m/s)" )
return
if ( len ( args ) == 3 ) :
x_mps = float ( args [ 0 ] )
y_mps = float ( args [ 1 ] )
z_mps = float ( args [ 2 ] )
# print ( " x : % f , y : % f , z : % f " % ( x _ mps , y _ mps , z _ mps ) )
self . master . mav . set_position_target_local_ned_send ( 0 , # time _ boot _ ms ( not used )
0 , 0 , # target system , target component
mavutil . mavlink . MAV_FRAME_LOCAL_NED , # frame
0b0000111111000111 , # type _ mask ( only speeds enabled )
0 , 0 , 0 , # x , y , z positions ( not used )
x_mps , y_mps , - z_mps , # x , y , z velocity in m / s
0 , 0 , 0 , # x , y , z acceleration ( not supported yet , ignored in GCS _ Mavlink )
0 , 0 ) |
def detector_50_Cent ( text ) :
"""Determine whether 50 Cent is a topic .""" | keywords = [ "50 Cent" , "rap" , "hip hop" , "Curtis James Jackson III" , "Curtis Jackson" , "Eminem" , "Dre" , "Get Rich or Die Tryin'" , "G-Unit" , "Street King Immortal" , "In da Club" , "Interscope" , ]
num_keywords = sum ( word in text for word in keywords )
return ( "50 Cent" , float ( num_keywords > 2 ) ) |
def ping ( bot , mask , target , args ) :
"""ping / pong
% % ping""" | bot . send ( 'NOTICE %(nick)s :PONG %(nick)s!' % dict ( nick = mask . nick ) ) |
def convert_iris ( directory , output_directory , output_filename = 'iris.hdf5' ) :
"""Convert the Iris dataset to HDF5.
Converts the Iris dataset to an HDF5 dataset compatible with
: class : ` fuel . datasets . Iris ` . The converted dataset is
saved as ' iris . hdf5 ' .
This method assumes the existence of the file ` iris . data ` .
Parameters
directory : str
Directory in which input files reside .
output _ directory : str
Directory in which to save the converted dataset .
output _ filename : str , optional
Name of the saved dataset . Defaults to ` None ` , in which case a name
based on ` dtype ` will be used .
Returns
output _ paths : tuple of str
Single - element tuple containing the path to the converted dataset .""" | classes = { b'Iris-setosa' : 0 , b'Iris-versicolor' : 1 , b'Iris-virginica' : 2 }
data = numpy . loadtxt ( os . path . join ( directory , 'iris.data' ) , converters = { 4 : lambda x : classes [ x ] } , delimiter = ',' )
features = data [ : , : - 1 ] . astype ( 'float32' )
targets = data [ : , - 1 ] . astype ( 'uint8' ) . reshape ( ( - 1 , 1 ) )
data = ( ( 'all' , 'features' , features ) , ( 'all' , 'targets' , targets ) )
output_path = os . path . join ( output_directory , output_filename )
h5file = h5py . File ( output_path , mode = 'w' )
fill_hdf5_file ( h5file , data )
h5file [ 'features' ] . dims [ 0 ] . label = 'batch'
h5file [ 'features' ] . dims [ 1 ] . label = 'feature'
h5file [ 'targets' ] . dims [ 0 ] . label = 'batch'
h5file [ 'targets' ] . dims [ 1 ] . label = 'index'
h5file . flush ( )
h5file . close ( )
return ( output_path , ) |
def ParseFromHumanReadable ( self , string ) :
"""Parse a human readable string of a byte string .
Args :
string : The string to parse .
Raises :
DecodeError : If the string can not be parsed .""" | if not string :
return None
match = self . REGEX . match ( string . strip ( ) . lower ( ) )
if not match :
raise DecodeError ( "Unknown specification for ByteSize %s" % string )
multiplier = self . DIVIDERS . get ( match . group ( 2 ) )
if not multiplier :
raise DecodeError ( "Invalid multiplier %s" % match . group ( 2 ) )
# The value may be represented as a float , but if not dont lose accuracy .
value = match . group ( 1 )
if "." in value :
value = float ( value )
else :
value = int ( value )
self . _value = int ( value * multiplier ) |
def format ( self , record ) :
"""Format a message from a record object .""" | record = ColoredRecord ( record )
record . log_color = self . color ( self . log_colors , record . levelname )
# Set secondary log colors
if self . secondary_log_colors :
for name , log_colors in self . secondary_log_colors . items ( ) :
color = self . color ( log_colors , record . levelname )
setattr ( record , name + '_log_color' , color )
# Format the message
if sys . version_info > ( 2 , 7 ) :
message = super ( ColoredFormatter , self ) . format ( record )
else :
message = logging . Formatter . format ( self , record )
# Add a reset code to the end of the message
# ( if it wasn ' t explicitly added in format str )
if self . reset and not message . endswith ( escape_codes [ 'reset' ] ) :
message += escape_codes [ 'reset' ]
if '|' in message :
desc , data = message . split ( "|" , 1 )
desc = desc + escape_codes [ 'reset' ]
data = escape_codes [ 'green' ] + data
message = desc + '|' + data
return message |
def get_all_kernels ( self , kernel_ids = None , owners = None ) :
"""Retrieve all the EC2 kernels available on your account .
Constructs a filter to allow the processing to happen server side .
: type kernel _ ids : list
: param kernel _ ids : A list of strings with the image IDs wanted
: type owners : list
: param owners : A list of owner IDs
: rtype : list
: return : A list of : class : ` boto . ec2 . image . Image `""" | params = { }
if kernel_ids :
self . build_list_params ( params , kernel_ids , 'ImageId' )
if owners :
self . build_list_params ( params , owners , 'Owner' )
filter = { 'image-type' : 'kernel' }
self . build_filter_params ( params , filter )
return self . get_list ( 'DescribeImages' , params , [ ( 'item' , Image ) ] , verb = 'POST' ) |
def update ( self , milliseconds ) :
"""Updates all of the objects in our world .""" | self . __sort_up ( )
for obj in self . __up_objects :
obj . update ( milliseconds ) |
def get_channelstate_for ( chain_state : ChainState , payment_network_id : PaymentNetworkID , token_address : TokenAddress , partner_address : Address , ) -> Optional [ NettingChannelState ] :
"""Return the NettingChannelState if it exists , None otherwise .""" | token_network = get_token_network_by_token_address ( chain_state , payment_network_id , token_address , )
channel_state = None
if token_network :
channels = [ token_network . channelidentifiers_to_channels [ channel_id ] for channel_id in token_network . partneraddresses_to_channelidentifiers [ partner_address ] ]
states = filter_channels_by_status ( channels , [ CHANNEL_STATE_UNUSABLE ] , )
# If multiple channel states are found , return the last one .
if states :
channel_state = states [ - 1 ]
return channel_state |
def _encode_datetime ( name , value , dummy0 , dummy1 ) :
"""Encode datetime . datetime .""" | if value . utcoffset ( ) is not None :
value = value - value . utcoffset ( )
millis = int ( calendar . timegm ( value . timetuple ( ) ) * 1000 + value . microsecond / 1000 )
return b"\x09" + name + _PACK_LONG ( millis ) |
def __create_nlinks ( self , data , inds = None , boundary_penalties_fcn = None ) :
"""Compute nlinks grid from data shape information . For boundary penalties
are data ( intensities ) values are used .
ins : Default is None . Used for multiscale GC . This are indexes of
multiscale pixels . Next example shows one superpixel witn index 2.
inds = [
[1 2 2 ] ,
[3 2 2 ] ,
[4 5 6 ] ]
boundary _ penalties _ fcn : is function with one argument - axis . It can
it can be used for setting penalty weights between neighbooring
pixels .""" | # use the gerneral graph algorithm
# first , we construct the grid graph
start = time . time ( )
if inds is None :
inds = np . arange ( data . size ) . reshape ( data . shape )
# if not self . segparams [ ' use _ boundary _ penalties ' ] and \
# boundary _ penalties _ fcn is None :
if boundary_penalties_fcn is None : # This is faster for some specific format
edgx = np . c_ [ inds [ : , : , : - 1 ] . ravel ( ) , inds [ : , : , 1 : ] . ravel ( ) ]
edgy = np . c_ [ inds [ : , : - 1 , : ] . ravel ( ) , inds [ : , 1 : , : ] . ravel ( ) ]
edgz = np . c_ [ inds [ : - 1 , : , : ] . ravel ( ) , inds [ 1 : , : , : ] . ravel ( ) ]
else :
logger . info ( "use_boundary_penalties" )
bpw = self . segparams [ "boundary_penalties_weight" ]
bpa = boundary_penalties_fcn ( 2 )
# id1 = inds [ : , : , : - 1 ] . ravel ( )
edgx = np . c_ [ inds [ : , : , : - 1 ] . ravel ( ) , inds [ : , : , 1 : ] . ravel ( ) , # cc * np . ones ( id1 . shape )
bpw * bpa [ : , : , 1 : ] . ravel ( ) , ]
bpa = boundary_penalties_fcn ( 1 )
# id1 = inds [ : , 1 : , : ] . ravel ( )
edgy = np . c_ [ inds [ : , : - 1 , : ] . ravel ( ) , inds [ : , 1 : , : ] . ravel ( ) , # cc * np . ones ( id1 . shape ) ]
bpw * bpa [ : , 1 : , : ] . ravel ( ) , ]
bpa = boundary_penalties_fcn ( 0 )
# id1 = inds [ 1 : , : , : ] . ravel ( )
edgz = np . c_ [ inds [ : - 1 , : , : ] . ravel ( ) , inds [ 1 : , : , : ] . ravel ( ) , # cc * np . ones ( id1 . shape ) ]
bpw * bpa [ 1 : , : , : ] . ravel ( ) , ]
# import pdb ; pdb . set _ trace ( )
edges = np . vstack ( [ edgx , edgy , edgz ] ) . astype ( np . int32 )
# edges - seznam indexu hran , kteres spolu sousedi \
elapsed = time . time ( ) - start
self . stats [ "_create_nlinks time" ] = elapsed
logger . info ( "__create nlinks time " + str ( elapsed ) )
return edges |
def confirm ( prompt = None , resp = False ) :
"""Prompts user for confirmation .
: param prompt : String to display to user .
: param resp : Default response value .
: return : Boolean response from user , or default value .""" | if prompt is None :
prompt = 'Confirm'
if resp :
prompt = '%s [%s]|%s: ' % ( prompt , 'y' , 'n' )
else :
prompt = '%s [%s]|%s: ' % ( prompt , 'n' , 'y' )
while True :
ans = raw_input ( prompt )
if not ans :
return resp
if ans not in [ 'y' , 'Y' , 'n' , 'N' ] :
print 'please enter y or n.'
continue
if ans == 'y' or ans == 'Y' :
return True
if ans == 'n' or ans == 'N' :
return False |
def shuffle ( enable ) :
"""Change shuffle mode of current player .""" | message = command ( protobuf . CommandInfo_pb2 . ChangeShuffleMode )
send_command = message . inner ( )
send_command . options . shuffleMode = 3 if enable else 1
return message |
def subnet_absent ( name , virtual_network , resource_group , connection_auth = None ) :
'''. . versionadded : : 2019.2.0
Ensure a virtual network does not exist in the virtual network .
: param name :
Name of the subnet .
: param virtual _ network :
Name of the existing virtual network containing the subnet .
: param resource _ group :
The resource group assigned to the virtual network .
: param connection _ auth :
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API .''' | ret = { 'name' : name , 'result' : False , 'comment' : '' , 'changes' : { } }
if not isinstance ( connection_auth , dict ) :
ret [ 'comment' ] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__ [ 'azurearm_network.subnet_get' ] ( name , virtual_network , resource_group , azurearm_log_level = 'info' , ** connection_auth )
if 'error' in snet :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Subnet {0} was not found.' . format ( name )
return ret
elif __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Subnet {0} would be deleted.' . format ( name )
ret [ 'result' ] = None
ret [ 'changes' ] = { 'old' : snet , 'new' : { } , }
return ret
deleted = __salt__ [ 'azurearm_network.subnet_delete' ] ( name , virtual_network , resource_group , ** connection_auth )
if deleted :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Subnet {0} has been deleted.' . format ( name )
ret [ 'changes' ] = { 'old' : snet , 'new' : { } }
return ret
ret [ 'comment' ] = 'Failed to delete subnet {0}!' . format ( name )
return ret |
def views_show_many ( self , ids = None , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / core / views # list - views - by - id" | api_path = "/api/v2/views/show_many.json"
api_query = { }
if "query" in kwargs . keys ( ) :
api_query . update ( kwargs [ "query" ] )
del kwargs [ "query" ]
if ids :
api_query . update ( { "ids" : ids , } )
return self . call ( api_path , query = api_query , ** kwargs ) |
async def i2c_read_data ( self , command ) :
"""This method retrieves the last value read for an i2c device identified by address .
This is a polling implementation and i2c _ read _ request and i2c _ read _ request _ reply may be
a better alternative .
: param command : { " method " : " i2c _ read _ data " , " params " : [ I2C _ ADDRESS ] }
: returns : { " method " : " i2c _ read _ data _ reply " , " params " : i2c _ data }""" | address = int ( command [ 0 ] )
i2c_data = await self . core . i2c_read_data ( address )
reply = json . dumps ( { "method" : "i2c_read_data_reply" , "params" : i2c_data } )
await self . websocket . send ( reply ) |
def get_logins ( self , user_id , start_date = None ) :
"""Gets the login history for a user , default start _ date is 30 days ago
: param int id : User id to get
: param string start _ date : " % m / % d / % Y % H : % M : % s " formatted string .
: returns : list https : / / softlayer . github . io / reference / datatypes / SoftLayer _ User _ Customer _ Access _ Authentication /
Example : :
get _ logins ( 123 , ' 04/08/2018 0:0:0 ' )""" | if start_date is None :
date_object = datetime . datetime . today ( ) - datetime . timedelta ( days = 30 )
start_date = date_object . strftime ( "%m/%d/%Y 0:0:0" )
date_filter = { 'loginAttempts' : { 'createDate' : { 'operation' : 'greaterThanDate' , 'options' : [ { 'name' : 'date' , 'value' : [ start_date ] } ] } } }
login_log = self . user_service . getLoginAttempts ( id = user_id , filter = date_filter )
return login_log |
def _git_config ( cwd , user , password , output_encoding = None ) :
'''Helper to retrieve git config options''' | contextkey = 'git.config.' + cwd
if contextkey not in __context__ :
git_dir = rev_parse ( cwd , opts = [ '--git-dir' ] , user = user , password = password , ignore_retcode = True , output_encoding = output_encoding )
if not os . path . isabs ( git_dir ) :
paths = ( cwd , git_dir , 'config' )
else :
paths = ( git_dir , 'config' )
__context__ [ contextkey ] = os . path . join ( * paths )
return __context__ [ contextkey ] |
def _graphql_query_waittime ( self , query_hash : str , current_time : float , untracked_queries : bool = False ) -> int :
"""Calculate time needed to wait before GraphQL query can be executed .""" | sliding_window = 660
if query_hash not in self . _graphql_query_timestamps :
self . _graphql_query_timestamps [ query_hash ] = [ ]
self . _graphql_query_timestamps [ query_hash ] = list ( filter ( lambda t : t > current_time - 60 * 60 , self . _graphql_query_timestamps [ query_hash ] ) )
reqs_in_sliding_window = list ( filter ( lambda t : t > current_time - sliding_window , self . _graphql_query_timestamps [ query_hash ] ) )
count_per_sliding_window = self . _graphql_request_count_per_sliding_window ( query_hash )
if len ( reqs_in_sliding_window ) < count_per_sliding_window and not untracked_queries :
return max ( 0 , self . _graphql_earliest_next_request_time - current_time )
next_request_time = min ( reqs_in_sliding_window ) + sliding_window + 6
if untracked_queries :
self . _graphql_earliest_next_request_time = next_request_time
return round ( max ( next_request_time , self . _graphql_earliest_next_request_time ) - current_time ) |
def long_form_one_format ( jupytext_format , metadata = None , update = None ) :
"""Parse ' sfx . py : percent ' into { ' suffix ' : ' sfx ' , ' extension ' : ' py ' , ' format _ name ' : ' percent ' }""" | if isinstance ( jupytext_format , dict ) :
if update :
jupytext_format . update ( update )
return validate_one_format ( jupytext_format )
if not jupytext_format :
return { }
common_name_to_ext = { 'notebook' : 'ipynb' , 'rmarkdown' : 'Rmd' , 'markdown' : 'md' , 'c++' : 'cpp' }
if jupytext_format . lower ( ) in common_name_to_ext :
jupytext_format = common_name_to_ext [ jupytext_format . lower ( ) ]
fmt = { }
if jupytext_format . rfind ( '/' ) > 0 :
fmt [ 'prefix' ] , jupytext_format = jupytext_format . rsplit ( '/' , 1 )
if jupytext_format . rfind ( ':' ) >= 0 :
ext , fmt [ 'format_name' ] = jupytext_format . rsplit ( ':' , 1 )
else :
ext = jupytext_format
if ext . rfind ( '.' ) > 0 :
fmt [ 'suffix' ] , ext = os . path . splitext ( ext )
if not ext . startswith ( '.' ) :
ext = '.' + ext
if ext == '.auto' :
ext = auto_ext_from_metadata ( metadata ) if metadata is not None else '.auto'
if not ext :
raise JupytextFormatError ( "No language information in this notebook. Please replace 'auto' with " "an actual script extension." )
fmt [ 'extension' ] = ext
if update :
fmt . update ( update )
return validate_one_format ( fmt ) |
def raw ( self ) -> str :
"""Return signed raw format string of the Membership instance
: return :""" | return """Version: {0}
Type: Membership
Currency: {1}
Issuer: {2}
Block: {3}
Membership: {4}
UserID: {5}
CertTS: {6}
""" . format ( self . version , self . currency , self . issuer , self . membership_ts , self . membership_type , self . uid , self . identity_ts ) |
def mediation_analysis ( data = None , x = None , m = None , y = None , covar = None , alpha = 0.05 , n_boot = 500 , seed = None , return_dist = False ) :
"""Mediation analysis using a bias - correct non - parametric bootstrap method .
Parameters
data : pd . DataFrame
Dataframe .
x : str
Column name in data containing the predictor variable .
The predictor variable must be continuous .
m : str or list of str
Column name ( s ) in data containing the mediator variable ( s ) .
The mediator ( s ) can be continuous or binary ( e . g . 0 or 1 ) .
This function supports multiple parallel mediators .
y : str
Column name in data containing the outcome variable .
The outcome variable must be continuous .
covar : None , str , or list
Covariate ( s ) . If not None , the specified covariate ( s ) will be included
in all regressions .
alpha : float
Significance threshold . Used to determine the confidence interval ,
CI = [ alpha / 2 ; 1 - alpha / 2]
n _ boot : int
Number of bootstrap iterations for confidence intervals and p - values
estimation . The greater , the slower .
seed : int or None
Random state seed .
return _ dist : bool
If True , the function also returns the indirect bootstrapped beta
samples ( size = n _ boot ) . Can be plotted for instance using
: py : func : ` seaborn . distplot ( ) ` or : py : func : ` seaborn . kdeplot ( ) `
functions .
Returns
stats : pd . DataFrame
Mediation summary : :
' path ' : regression model
' coef ' : regression estimates
' se ' : standard error
' CI [ 2.5 % ] ' : lower confidence interval
' CI [ 97.5 % ] ' : upper confidence interval
' pval ' : two - sided p - values
' sig ' : statistical significance
Notes
Mediation analysis is a " statistical procedure to test
whether the effect of an independent variable X on a dependent variable
Y ( i . e . , X → Y ) is at least partly explained by a chain of effects of the
independent variable on an intervening mediator variable M and of the
intervening variable on the dependent variable ( i . e . , X → M → Y ) "
( from Fiedler et al . 2011 ) .
The * * indirect effect * * ( also referred to as average causal mediation
effect or ACME ) of X on Y through mediator M quantifies the estimated
difference in Y resulting from a one - unit change in X through a sequence of
causal steps in which X affects M , which in turn affects Y .
It is considered significant if the specified confidence interval does not
include 0 . The path ' X - - > Y ' is the sum of both the indirect and direct
effect . It is sometimes referred to as total effect . For more details ,
please refer to Fiedler et al 2011 or Hayes and Rockwood 2017.
A linear regression is used if the mediator variable is continuous and a
logistic regression if the mediator variable is dichotomous ( binary ) . Note
that this function also supports parallel multiple mediators : " in such
models , mediators may be and often are correlated , but nothing in the
model allows one mediator to causally influence another . "
( Hayes and Rockwood 2017)
This function wll only work well if the outcome variable is continuous .
It does not support binary or ordinal outcome variable . For more
advanced mediation models , please refer to the ` lavaan ` or ` mediation ` R
packages , or the PROCESS macro for SPSS .
The two - sided p - value of the indirect effect is computed using the
bootstrap distribution , as in the mediation R package . However , the p - value
should be interpreted with caution since it is a ) not constructed
conditioned on a true null hypothesis ( see Hayes and Rockwood 2017 ) and b )
varies depending on the number of bootstrap samples and the random seed .
Note that rows with NaN are automatically removed .
Results have been tested against the R mediation package and this tutorial
https : / / data . library . virginia . edu / introduction - to - mediation - analysis /
References
. . [ 1 ] Baron , R . M . & Kenny , D . A . The moderator – mediator variable
distinction in social psychological research : Conceptual , strategic ,
and statistical considerations . J . Pers . Soc . Psychol . 51 , 1173–1182
(1986 ) .
. . [ 2 ] Fiedler , K . , Schott , M . & Meiser , T . What mediation analysis can
( not ) do . J . Exp . Soc . Psychol . 47 , 1231–1236 ( 2011 ) .
. . [ 3 ] Hayes , A . F . & Rockwood , N . J . Regression - based statistical
mediation and moderation analysis in clinical research :
Observations , recommendations , and implementation . Behav . Res .
Ther . 98 , 39–57 ( 2017 ) .
. . [ 4 ] https : / / cran . r - project . org / web / packages / mediation / mediation . pdf
. . [ 5 ] http : / / lavaan . ugent . be / tutorial / mediation . html
. . [ 6 ] https : / / github . com / rmill040 / pymediation
Examples
1 . Simple mediation analysis
> > > from pingouin import mediation _ analysis , read _ dataset
> > > df = read _ dataset ( ' mediation ' )
> > > mediation _ analysis ( data = df , x = ' X ' , m = ' M ' , y = ' Y ' , alpha = 0.05 , seed = 42)
path coef se pval CI [ 2.5 % ] CI [ 97.5 % ] sig
0 M ~ X 0.5610 0.0945 4.391362e - 08 0.3735 0.7485 Yes
1 Y ~ M 0.6542 0.0858 1.612674e - 11 0.4838 0.8245 Yes
2 Total 0.3961 0.1112 5.671128e - 04 0.1755 0.6167 Yes
3 Direct 0.0396 0.1096 7.187429e - 01 - 0.1780 0.2572 No
4 Indirect 0.3565 0.0833 0.00000e + 00 0.2198 0.5377 Yes
2 . Return the indirect bootstrapped beta coefficients
> > > stats , dist = mediation _ analysis ( data = df , x = ' X ' , m = ' M ' , y = ' Y ' ,
. . . return _ dist = True )
> > > print ( dist . shape )
(500 , )
3 . Mediation analysis with a binary mediator variable
> > > mediation _ analysis ( data = df , x = ' X ' , m = ' Mbin ' , y = ' Y ' , seed = 42)
path coef se pval CI [ 2.5 % ] CI [ 97.5 % ] sig
0 Mbin ~ X - 0.0205 0.1159 0.859392 - 0.2476 0.2066 No
1 Y ~ Mbin - 0.1354 0.4118 0.743076 - 0.9525 0.6818 No
2 Total 0.3961 0.1112 0.000567 0.1755 0.6167 Yes
3 Direct 0.3956 0.1117 0.000614 0.1739 0.6173 Yes
4 Indirect 0.0023 0.0495 0.960000 - 0.0715 0.1441 No
4 . Mediation analysis with covariates
> > > mediation _ analysis ( data = df , x = ' X ' , m = ' M ' , y = ' Y ' ,
. . . covar = [ ' Mbin ' , ' Ybin ' ] , seed = 42)
path coef se pval CI [ 2.5 % ] CI [ 97.5 % ] sig
0 M ~ X 0.5594 0.0968 9.394635e - 08 0.3672 0.7516 Yes
1 Y ~ M 0.6660 0.0861 1.017261e - 11 0.4951 0.8368 Yes
2 Total 0.4204 0.1129 3.324252e - 04 0.1962 0.6446 Yes
3 Direct 0.0645 0.1104 5.608583e - 01 - 0.1548 0.2837 No
4 Indirect 0.3559 0.0865 0.00000e + 00 0.2093 0.5530 Yes
5 . Mediation analysis with multiple parallel mediators
> > > mediation _ analysis ( data = df , x = ' X ' , m = [ ' M ' , ' Mbin ' ] , y = ' Y ' , seed = 42)
path coef se pval CI [ 2.5 % ] CI [ 97.5 % ] sig
0 M ~ X 0.5610 0.0945 4.391362e - 08 0.3735 0.7485 Yes
1 Mbin ~ X - 0.0051 0.0290 8.592408e - 01 - 0.0626 0.0523 No
2 Y ~ M 0.6537 0.0863 2.118163e - 11 0.4824 0.8250 Yes
3 Y ~ Mbin - 0.0640 0.3282 8.456998e - 01 - 0.7154 0.5873 No
4 Total 0.3961 0.1112 5.671128e - 04 0.1755 0.6167 Yes
5 Direct 0.0395 0.1102 7.206301e - 01 - 0.1792 0.2583 No
6 Indirect M 0.3563 0.0845 0.00000e + 00 0.2148 0.5385 Yes
7 Indirect Mbin 0.0003 0.0097 9.520000e - 01 - 0.0172 0.0252 No""" | # Sanity check
assert isinstance ( x , str ) , 'y must be a string.'
assert isinstance ( y , str ) , 'y must be a string.'
assert isinstance ( m , ( list , str ) ) , 'Mediator(s) must be a list or string.'
assert isinstance ( covar , ( type ( None ) , str , list ) )
if isinstance ( m , str ) :
m = [ m ]
n_mediator = len ( m )
assert isinstance ( data , pd . DataFrame ) , 'Data must be a DataFrame.'
# Check for duplicates
assert n_mediator == len ( set ( m ) ) , 'Cannot have duplicates mediators.'
if isinstance ( covar , str ) :
covar = [ covar ]
if isinstance ( covar , list ) :
assert len ( covar ) == len ( set ( covar ) ) , 'Cannot have duplicates covar.'
assert set ( m ) . isdisjoint ( covar ) , 'Mediator cannot be in covar.'
# Check that columns are in dataframe
columns = _fl ( [ x , m , y , covar ] )
keys = data . columns
assert all ( [ c in keys for c in columns ] ) , 'Column(s) are not in DataFrame.'
# Check that columns are numeric
err_msg = "Columns must be numeric or boolean."
assert all ( [ data [ c ] . dtype . kind in 'bfi' for c in columns ] ) , err_msg
# Drop rows with NAN Values
data = data [ columns ] . dropna ( )
n = data . shape [ 0 ]
assert n > 5 , 'DataFrame must have at least 5 samples (rows).'
# Check if mediator is binary
mtype = 'logistic' if all ( data [ m ] . nunique ( ) == 2 ) else 'linear'
# Name of CI
ll_name = 'CI[%.1f%%]' % ( 100 * alpha / 2 )
ul_name = 'CI[%.1f%%]' % ( 100 * ( 1 - alpha / 2 ) )
# Compute regressions
cols = [ 'names' , 'coef' , 'se' , 'pval' , ll_name , ul_name ]
# For speed , we pass np . array instead of pandas DataFrame
X_val = data [ _fl ( [ x , covar ] ) ] . values
# X + covar as predictors
XM_val = data [ _fl ( [ x , m , covar ] ) ] . values
# X + M + covar as predictors
M_val = data [ m ] . values
# M as target ( no covariates )
y_val = data [ y ] . values
# y as target ( no covariates )
# M ( j ) ~ X + covar
sxm = { }
for idx , j in enumerate ( m ) :
if mtype == 'linear' :
sxm [ j ] = linear_regression ( X_val , M_val [ : , idx ] , alpha = alpha ) . loc [ [ 1 ] , cols ]
else :
sxm [ j ] = logistic_regression ( X_val , M_val [ : , idx ] , alpha = alpha ) . loc [ [ 1 ] , cols ]
sxm [ j ] . loc [ 1 , 'names' ] = '%s ~ X' % j
sxm = pd . concat ( sxm , ignore_index = True )
# Y ~ M + covar
smy = linear_regression ( data [ _fl ( [ m , covar ] ) ] , y_val , alpha = alpha ) . loc [ 1 : n_mediator , cols ]
# Average Total Effects ( Y ~ X + covar )
sxy = linear_regression ( X_val , y_val , alpha = alpha ) . loc [ [ 1 ] , cols ]
# Average Direct Effects ( Y ~ X + M + covar )
direct = linear_regression ( XM_val , y_val , alpha = alpha ) . loc [ [ 1 ] , cols ]
# Rename paths
smy [ 'names' ] = smy [ 'names' ] . apply ( lambda x : 'Y ~ %s' % x )
direct . loc [ 1 , 'names' ] = 'Direct'
sxy . loc [ 1 , 'names' ] = 'Total'
# Concatenate and create sig column
stats = pd . concat ( ( sxm , smy , sxy , direct ) , ignore_index = True )
stats [ 'sig' ] = np . where ( stats [ 'pval' ] < alpha , 'Yes' , 'No' )
# Bootstrap confidence intervals
rng = np . random . RandomState ( seed )
idx = rng . choice ( np . arange ( n ) , replace = True , size = ( n_boot , n ) )
ab_estimates = np . zeros ( shape = ( n_boot , n_mediator ) )
for i in range ( n_boot ) :
ab_estimates [ i , : ] = _point_estimate ( X_val , XM_val , M_val , y_val , idx [ i , : ] , n_mediator , mtype )
ab = _point_estimate ( X_val , XM_val , M_val , y_val , np . arange ( n ) , n_mediator , mtype )
indirect = { 'names' : m , 'coef' : ab , 'se' : ab_estimates . std ( ddof = 1 , axis = 0 ) , 'pval' : [ ] , ll_name : [ ] , ul_name : [ ] , 'sig' : [ ] }
for j in range ( n_mediator ) :
ci_j = _bca ( ab_estimates [ : , j ] , indirect [ 'coef' ] [ j ] , alpha = alpha , n_boot = n_boot )
indirect [ ll_name ] . append ( min ( ci_j ) )
indirect [ ul_name ] . append ( max ( ci_j ) )
# Bootstrapped p - value of indirect effect
# Note that this is less accurate than a permutation test because the
# bootstrap distribution is not conditioned on a true null hypothesis .
# For more details see Hayes and Rockwood 2017
indirect [ 'pval' ] . append ( _pval_from_bootci ( ab_estimates [ : , j ] , indirect [ 'coef' ] [ j ] ) )
indirect [ 'sig' ] . append ( 'Yes' if indirect [ 'pval' ] [ j ] < alpha else 'No' )
# Create output dataframe
indirect = pd . DataFrame . from_dict ( indirect )
if n_mediator == 1 :
indirect [ 'names' ] = 'Indirect'
else :
indirect [ 'names' ] = indirect [ 'names' ] . apply ( lambda x : 'Indirect %s' % x )
stats = stats . append ( indirect , ignore_index = True )
stats = stats . rename ( columns = { 'names' : 'path' } )
# Round
col_to_round = [ 'coef' , 'se' , ll_name , ul_name ]
stats [ col_to_round ] = stats [ col_to_round ] . round ( 4 )
if return_dist :
return stats , np . squeeze ( ab_estimates )
else :
return stats |
def swap_word_order ( source ) :
"""Swap the order of the words in ' source ' bitstring""" | assert len ( source ) % 4 == 0
words = "I" * ( len ( source ) // 4 )
return struct . pack ( words , * reversed ( struct . unpack ( words , source ) ) ) |
def verify_light_chains ( self , threshold = 0.9 ) :
'''Clusters the light chains to identify potentially spurious ( non - lineage )
pairings . Following clustering , all pairs in the largest light chain
cluster are assumed to be correctly paired . For each of those pairs ,
the < verified > attribute is set to True . For pairs not in the largest
light chain cluster , the < verified > attribute is set to False .
Inputs ( optional )
threshold : CD - HIT clustering threshold . Default is 0.9.''' | lseqs = [ l . light for l in self . lights ]
clusters = cluster ( lseqs , threshold = threshold )
clusters . sort ( key = lambda x : x . size , reverse = True )
verified_ids = clusters [ 0 ] . ids
for p in self . lights :
p . verified = True if p . name in verified_ids else False |
def apply ( self , tokens ) :
"""Applies the named entity recognizer to the given list of tokens ,
where each token is a [ word , tag ] list .""" | # Note : we could also scan for patterns , e . g . ,
# " my | his | her name is | was * " = > NNP - PERS .
i = 0
while i < len ( tokens ) :
w = tokens [ i ] [ 0 ] . lower ( )
if RE_ENTITY1 . match ( w ) or RE_ENTITY2 . match ( w ) or RE_ENTITY3 . match ( w ) :
tokens [ i ] [ 1 ] = self . tag
if w in self :
for e in self [ w ] : # Look ahead to see if successive words match the named entity .
e , tag = ( e [ : - 1 ] , "-" + e [ - 1 ] . upper ( ) ) if e [ - 1 ] in self . _cmd else ( e , "" )
b = True
for j , e in enumerate ( e ) :
if i + j >= len ( tokens ) or tokens [ i + j ] [ 0 ] . lower ( ) != e :
b = False ;
break
if b :
for token in tokens [ i : i + j + 1 ] :
token [ 1 ] = token [ 1 ] if token [ 1 ] . startswith ( self . tag ) else self . tag
token [ 1 ] += tag
i += j
break
i += 1
return tokens |
def field_value ( key , label , color , padding ) :
"""Print a specific field ' s stats .""" | if not clr . has_colors and padding > 0 :
padding = 7
if color == "bright gray" or color == "dark gray" :
bright_prefix = ""
else :
bright_prefix = "bright "
field = clr . stringc ( key , "{0}{1}" . format ( bright_prefix , color ) )
field_label = clr . stringc ( label , color )
return "{0:>{1}} {2}" . format ( field , padding , field_label ) |
def as_widget ( self , widget = None , attrs = None , only_initial = False ) :
"""Renders the field .""" | if not widget :
widget = self . field . widget
if DJANGO_VERSION > ( 1 , 10 ) : # so that we can refer to the field when building the rendering context
widget . _field = self . field
# Make sure that NgWidgetMixin is not already part of the widget ' s bases so it doesn ' t get added twice .
if not isinstance ( widget , NgWidgetMixin ) :
widget . __class__ = type ( widget . __class__ . __name__ , ( NgWidgetMixin , widget . __class__ ) , { } )
return super ( NgBoundField , self ) . as_widget ( widget , attrs , only_initial ) |
def LoadFromString ( yaml_doc , product_yaml_key , required_client_values , optional_product_values ) :
"""Loads the data necessary for instantiating a client from file storage .
In addition to the required _ client _ values argument , the yaml file must supply
the keys used to create OAuth2 credentials . It may also optionally set proxy
configurations .
Args :
yaml _ doc : the yaml document whose keys should be used .
product _ yaml _ key : The key to read in the yaml as a string .
required _ client _ values : A tuple of strings representing values which must
be in the yaml file for a supported API . If one of these keys is not in
the yaml file , an error will be raised .
optional _ product _ values : A tuple of strings representing optional values
which may be in the yaml file .
Returns :
A dictionary map of the keys in the yaml file to their values . This will not
contain the keys used for OAuth2 client creation and instead will have a
GoogleOAuth2Client object stored in the ' oauth2 _ client ' field .
Raises :
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required _ client _ values key was missing or an OAuth2 key was missing .""" | data = yaml . safe_load ( yaml_doc ) or { }
if 'dfp' in data :
raise googleads . errors . GoogleAdsValueError ( 'Please replace the "dfp" key in the configuration YAML string with' '"ad_manager" to fix this issue.' )
logging_config = data . get ( _LOGGING_KEY )
if logging_config :
logging . config . dictConfig ( logging_config )
try :
product_data = data [ product_yaml_key ]
except KeyError :
raise googleads . errors . GoogleAdsValueError ( 'The "%s" configuration is missing' % ( product_yaml_key , ) )
if not isinstance ( product_data , dict ) :
raise googleads . errors . GoogleAdsValueError ( 'The "%s" configuration is empty or invalid' % ( product_yaml_key , ) )
IncludeUtilitiesInUserAgent ( data . get ( _UTILITY_REGISTER_YAML_KEY , True ) )
original_keys = list ( product_data . keys ( ) )
client_kwargs = { }
try :
for key in required_client_values :
client_kwargs [ key ] = product_data [ key ]
del product_data [ key ]
except KeyError :
raise googleads . errors . GoogleAdsValueError ( 'Some of the required values are missing. Required ' 'values are: %s, actual values are %s' % ( required_client_values , original_keys ) )
proxy_config_data = data . get ( _PROXY_CONFIG_KEY , { } )
proxy_config = _ExtractProxyConfig ( product_yaml_key , proxy_config_data )
client_kwargs [ 'proxy_config' ] = proxy_config
client_kwargs [ 'oauth2_client' ] = _ExtractOAuth2Client ( product_yaml_key , product_data , proxy_config )
client_kwargs [ ENABLE_COMPRESSION_KEY ] = data . get ( ENABLE_COMPRESSION_KEY , False )
client_kwargs [ CUSTOM_HEADERS_KEY ] = data . get ( CUSTOM_HEADERS_KEY , None )
if SOAP_IMPLEMENTATION_KEY in data :
client_kwargs [ SOAP_IMPLEMENTATION_KEY ] = data [ SOAP_IMPLEMENTATION_KEY ]
for value in optional_product_values :
if value in product_data :
client_kwargs [ value ] = product_data [ value ]
del product_data [ value ]
if product_data :
warnings . warn ( 'Could not recognize the following keys: %s. ' 'They were ignored.' % ( product_data , ) , stacklevel = 3 )
return client_kwargs |
def _parse_float_vec ( vec ) :
"""Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats .""" | dtype = np . dtype ( '>u4,>u4' )
vec1 = vec . view ( dtype = dtype )
xport1 = vec1 [ 'f0' ]
xport2 = vec1 [ 'f1' ]
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 0x00ffffff
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0 , 1 , 2 , or 3
# places . This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude .
shift = np . zeros ( len ( vec ) , dtype = np . uint8 )
shift [ np . where ( xport1 & 0x00200000 ) ] = 1
shift [ np . where ( xport1 & 0x00400000 ) ] = 2
shift [ np . where ( xport1 & 0x00800000 ) ] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately , ored with the bits
# from the first half that would have been shifted in if we
# could shift a double . All we are worried about are the low
# order 3 bits of the first half since we ' re only shifting by
# 1 , 2 , or 3.
ieee1 >>= shift
ieee2 = ( xport2 >> shift ) | ( ( xport1 & 0x00000007 ) << ( 29 + ( 3 - shift ) ) )
# clear the 1 bit to the left of the binary point
ieee1 &= 0xffefffff
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023 . Or this into the first half of the
# ieee number . The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point . ( had to add > > 24 because C treats &
# 0x7f as 0x7f00000 and Python doesn ' t )
ieee1 |= ( ( ( ( ( ( xport1 >> 24 ) & 0x7f ) - 65 ) << 2 ) + shift + 1023 ) << 20 ) | ( xport1 & 0x80000000 )
ieee = np . empty ( ( len ( ieee1 ) , ) , dtype = '>u4,>u4' )
ieee [ 'f0' ] = ieee1
ieee [ 'f1' ] = ieee2
ieee = ieee . view ( dtype = '>f8' )
ieee = ieee . astype ( 'f8' )
return ieee |
def filter_time_frame ( start , delta ) :
"""Filter : class : ` . Line ` objects by their connection time .
: param start : a time expression ( see - s argument on - - help for its format )
to filter log lines that are before this time .
: type start : string
: param delta : a relative time expression ( see - s argument on - - help for
its format ) to limit the amount of time log lines will be considered .
: type delta : string
: returns : a function that filters by the time a request is made .
: rtype : function""" | start_value = start
delta_value = delta
end_value = None
if start_value is not '' :
start_value = _date_str_to_datetime ( start_value )
if delta_value is not '' :
delta_value = _delta_str_to_timedelta ( delta_value )
if start_value is not '' and delta_value is not '' :
end_value = start_value + delta_value
def filter_func ( log_line ) :
if start_value is '' :
return True
elif start_value > log_line . accept_date :
return False
if end_value is None :
return True
elif end_value < log_line . accept_date :
return False
return True
return filter_func |
def main ( name , options ) :
"""The main method for this script .
: param name : The name of the VM to create .
: type name : str
: param template _ name : The name of the template to use for creating the VM .
: type template _ name : str""" | server = config . _config_value ( "general" , "server" , options . server )
if server is None :
raise ValueError ( "server must be supplied on command line" " or in configuration file." )
username = config . _config_value ( "general" , "username" , options . username )
if username is None :
raise ValueError ( "username must be supplied on command line" " or in configuration file." )
password = config . _config_value ( "general" , "password" , options . password )
if password is None :
raise ValueError ( "password must be supplied on command line" " or in configuration file." )
vm_template = None
if options . template is not None :
try :
vm_template = template . load_template ( options . template )
except TemplateNotFoundError :
print ( "ERROR: Template \"%s\" could not be found." % options . template )
sys . exit ( 1 )
expected_opts = [ "compute_resource" , "datastore" , "disksize" , "nics" , "memory" , "num_cpus" , "guest_id" , "host" ]
vm_opts = { }
for opt in expected_opts :
vm_opts [ opt ] = getattr ( options , opt )
if vm_opts [ opt ] is None :
if vm_template is None :
raise ValueError ( "%s not specified on the command line and" " you have not specified any template to" " inherit the value from." % opt )
try :
vm_opts [ opt ] = vm_template [ opt ]
except AttributeError :
raise ValueError ( "%s not specified on the command line and" " no value is provided in the specified" " template." % opt )
client = Client ( server = server , username = username , password = password )
create_vm ( client , name , vm_opts [ "compute_resource" ] , vm_opts [ "datastore" ] , vm_opts [ "disksize" ] , vm_opts [ "nics" ] , vm_opts [ "memory" ] , vm_opts [ "num_cpus" ] , vm_opts [ "guest_id" ] , host = vm_opts [ "host" ] )
client . logout ( ) |
def get ( self ) :
"""API endpoint to get the related blocks for a transaction .
Return :
A ` ` list ` ` of ` ` block _ id ` ` s that contain the given transaction . The
list may be filtered when provided a status query parameter :
" valid " , " invalid " , " undecided " .""" | parser = reqparse . RequestParser ( )
parser . add_argument ( 'transaction_id' , type = str , required = True )
args = parser . parse_args ( strict = True )
tx_id = args [ 'transaction_id' ]
pool = current_app . config [ 'bigchain_pool' ]
with pool ( ) as bigchain :
blocks = bigchain . get_block_containing_tx ( tx_id )
return blocks |
def CheckHash ( self , responses ) :
"""Adds the block hash to the file tracker responsible for this vfs URN .""" | index = responses . request_data [ "index" ]
if index not in self . state . pending_files : # This is a blobhash for a file we already failed to read and logged as
# below , check here to avoid logging dups .
return
file_tracker = self . state . pending_files [ index ]
hash_response = responses . First ( )
if not responses . success or not hash_response :
urn = file_tracker [ "stat_entry" ] . pathspec . AFF4Path ( self . client_urn )
self . Log ( "Failed to read %s: %s" , urn , responses . status )
self . _FileFetchFailed ( index , responses . request . request . name )
return
file_tracker . setdefault ( "hash_list" , [ ] ) . append ( hash_response )
self . state . blob_hashes_pending += 1
if self . state . blob_hashes_pending > self . MIN_CALL_TO_FILE_STORE :
self . FetchFileContent ( ) |
def geo_haystack ( self , name , bucket_size ) :
"""Create a Haystack index . See :
http : / / www . mongodb . org / display / DOCS / Geospatial + Haystack + Indexing
: param name : Name of the indexed column
: param bucket _ size : Size of the haystack buckets ( see mongo docs )""" | self . components . append ( ( name , 'geoHaystack' ) )
self . __bucket_size = bucket_size
return self |
def unit ( session ) :
"""Run the unit test suite .""" | # Testing multiple version of django
# See https : / / www . djangoproject . com / download / for supported version
django_deps_27 = [ ( 'django==1.8.19' , ) , ( 'django >= 1.11.0, < 2.0.0dev' , ) , ]
if session . virtualenv . interpreter == '2.7' :
[ default ( session , django_dep = django ) for django in django_deps_27 ]
else :
default ( session ) |
def _literal_handling ( self , cursor ) :
"""Parse all literal associated with this cursor .
Literal handling is usually useful only for initialization values .
We can ' t use a shortcut by getting tokens
# init _ value = ' ' . join ( [ t . spelling for t in children [ 0 ] . get _ tokens ( )
# if t . spelling ! = ' ; ' ] )
because some literal might need cleaning .""" | # use a shortcut - does not work on unicode var _ decl
# if cursor . kind = = CursorKind . STRING _ LITERAL :
# value = cursor . displayname
# value = self . _ clean _ string _ literal ( cursor , value )
# return value
tokens = list ( cursor . get_tokens ( ) )
log . debug ( 'literal has %d tokens.[ %s ]' , len ( tokens ) , str ( [ str ( t . spelling ) for t in tokens ] ) )
final_value = [ ]
# code . interact ( local = locals ( ) )
log . debug ( 'cursor.type:%s' , cursor . type . kind . name )
for i , token in enumerate ( tokens ) :
value = token . spelling
log . debug ( 'token:%s tk.kd:%11s tk.cursor.kd:%15s cursor.kd:%15s' , token . spelling , token . kind . name , token . cursor . kind . name , cursor . kind . name )
# Punctuation is probably not part of the init _ value ,
# but only in specific case : ' ; ' endl , or part of list _ expr
if ( token . kind == TokenKind . PUNCTUATION and ( token . cursor . kind == CursorKind . INVALID_FILE or token . cursor . kind == CursorKind . INIT_LIST_EXPR ) ) :
log . debug ( 'IGNORE token %s' , value )
continue
elif token . kind == TokenKind . COMMENT :
log . debug ( 'Ignore comment %s' , value )
continue
# elif token . cursor . kind = = CursorKind . VAR _ DECL :
elif token . location not in cursor . extent :
log . debug ( 'FIXME BUG: token.location not in cursor.extent %s' , value )
# FIXME
# there is most probably a BUG in clang or python - clang
# when on # define with no value , a token is taken from
# next line . Which break stuff .
# example :
# # define A
# extern int i ;
# / / this will give " extern " the last token of Macro ( " A " )
# Lexer is choking ?
# FIXME BUG : token . location not in cursor . extent
# code . interact ( local = locals ( ) )
continue
# Cleanup specific c - lang or c + + prefix / suffix for POD types .
if token . cursor . kind == CursorKind . INTEGER_LITERAL : # strip type suffix for constants
value = value . replace ( 'L' , '' ) . replace ( 'U' , '' )
value = value . replace ( 'l' , '' ) . replace ( 'u' , '' )
if value [ : 2 ] == '0x' or value [ : 2 ] == '0X' :
value = '0x%s' % value [ 2 : ]
# " int ( % s , 16 ) " % ( value )
else :
value = int ( value )
elif token . cursor . kind == CursorKind . FLOATING_LITERAL : # strip type suffix for constants
value = value . replace ( 'f' , '' ) . replace ( 'F' , '' )
value = float ( value )
elif ( token . cursor . kind == CursorKind . CHARACTER_LITERAL or token . cursor . kind == CursorKind . STRING_LITERAL ) :
value = self . _clean_string_literal ( token . cursor , value )
elif token . cursor . kind == CursorKind . MACRO_INSTANTIATION : # get the macro value
value = self . get_registered ( value ) . body
# already cleaned value = self . _ clean _ string _ literal ( token . cursor , value )
elif token . cursor . kind == CursorKind . MACRO_DEFINITION :
if i == 0 : # ignore , macro name
pass
elif token . kind == TokenKind . LITERAL : # and just clean it
value = self . _clean_string_literal ( token . cursor , value )
elif token . kind == TokenKind . IDENTIFIER : # parse that , try to see if there is another Macro in there .
value = self . get_registered ( value ) . body
# add token
final_value . append ( value )
# return the EXPR
# code . interact ( local = locals ( ) )
if len ( final_value ) == 1 :
return final_value [ 0 ]
# Macro definition of a string using multiple macro
if isinstance ( final_value , list ) and cursor . kind == CursorKind . STRING_LITERAL :
final_value = '' . join ( final_value )
return final_value |
def autohash_decorate ( cls , # type : Type [ T ]
include = None , # type : Union [ str , Tuple [ str ] ]
exclude = None , # type : Union [ str , Tuple [ str ] ]
only_constructor_args = False , # type : bool
only_public_fields = False , # type : bool
) : # type : ( . . . ) - > Type [ T ]
"""To automatically generate the appropriate methods so that objects of this class are hashable ,
manually , without using @ autohash decorator .
: param cls : the class on which to execute . Note that it won ' t be wrapped .
: param include : a tuple of explicit attribute names to include ( None means all )
: param exclude : a tuple of explicit attribute names to exclude . In such case , include should be None .
: param only _ constructor _ args : if False ( default ) , all fields will be included in the hash , even if they are defined
in the constructor or dynamically . If True , only constructor arguments will be included in the hash , not any other
field that would be created in the constructor or dynamically . Please note that this behaviour is the opposite from
@ autodict .
: param only _ public _ fields : this parameter is only used when only _ constructor _ args is set to False . If
only _ public _ fields is set to False ( default ) , all fields are used in the hash . Otherwise , class - private fields will
not be taken into account in the hash . Please note that this behaviour is the opposite from @ autodict .
: return :""" | # first check that we do not conflict with other known decorators
_check_known_decorators ( cls , '@autohash' )
# perform the class mod
_execute_autohash_on_class ( cls , include = include , exclude = exclude , only_constructor_args = only_constructor_args , only_public_fields = only_public_fields )
return cls |
def set_offchain_secret ( state : MediatorTransferState , channelidentifiers_to_channels : ChannelMap , secret : Secret , secrethash : SecretHash , ) -> List [ Event ] :
"""Set the secret to all mediated transfers .""" | state . secret = secret
for pair in state . transfers_pair :
payer_channel = channelidentifiers_to_channels . get ( pair . payer_transfer . balance_proof . channel_identifier , )
if payer_channel :
channel . register_offchain_secret ( payer_channel , secret , secrethash , )
payee_channel = channelidentifiers_to_channels . get ( pair . payee_transfer . balance_proof . channel_identifier , )
if payee_channel :
channel . register_offchain_secret ( payee_channel , secret , secrethash , )
# The secret should never be revealed if ` waiting _ transfer ` is not None .
# For this to happen this node must have received a transfer , which it did
# * not * mediate , and neverthless the secret was revealed .
# This can only be possible if the initiator reveals the secret without the
# target ' s secret request , or if the node which sent the ` waiting _ transfer `
# has sent another transfer which reached the target ( meaning someone along
# the path will lose tokens ) .
if state . waiting_transfer :
payer_channel = channelidentifiers_to_channels . get ( state . waiting_transfer . transfer . balance_proof . channel_identifier , )
if payer_channel :
channel . register_offchain_secret ( payer_channel , secret , secrethash , )
unexpected_reveal = EventUnexpectedSecretReveal ( secrethash = secrethash , reason = 'The mediator has a waiting transfer.' , )
return [ unexpected_reveal ]
return list ( ) |
def _get_batch_gender ( items ) :
"""Retrieve gender for a batch of items if consistent .
Better not to specify for mixed populations , CNVkit will work
it out
https : / / github . com / bcbio / bcbio - nextgen / commit / 1a0e217c8a4d3cee10fa890fb3cfd4db5034281d # r26279752""" | genders = set ( [ population . get_gender ( x ) for x in items ] )
if len ( genders ) == 1 :
gender = genders . pop ( )
if gender != "unknown" :
return gender |
def readBoolean ( self ) :
"""Read C { Boolean } .
@ raise ValueError : Error reading Boolean .
@ rtype : C { bool }
@ return : A Boolean value , C { True } if the byte
is nonzero , C { False } otherwise .""" | byte = self . stream . read ( 1 )
if byte == '\x00' :
return False
elif byte == '\x01' :
return True
else :
raise ValueError ( "Error reading boolean" ) |
def isSelfVerificationEnabled ( self ) :
"""Returns if the user that submitted a result for this analysis must
also be able to verify the result
: returns : true or false""" | bsve = self . bika_setup . getSelfVerificationEnabled ( )
vs = self . getSelfVerification ( )
return bsve if vs == - 1 else vs == 1 |
def load_toml_validator_config ( filename ) :
"""Returns a ValidatorConfig created by loading a TOML file from the
filesystem .""" | if not os . path . exists ( filename ) :
LOGGER . info ( "Skipping validator config loading from non-existent config file:" " %s" , filename )
return ValidatorConfig ( )
LOGGER . info ( "Loading validator information from config: %s" , filename )
try :
with open ( filename ) as fd :
raw_config = fd . read ( )
except IOError as e :
raise LocalConfigurationError ( "Unable to load validator configuration file: {}" . format ( str ( e ) ) )
toml_config = toml . loads ( raw_config )
invalid_keys = set ( toml_config . keys ( ) ) . difference ( [ 'bind' , 'endpoint' , 'peering' , 'seeds' , 'peers' , 'network_public_key' , 'network_private_key' , 'scheduler' , 'permissions' , 'roles' , 'opentsdb_url' , 'opentsdb_db' , 'opentsdb_username' , 'opentsdb_password' , 'minimum_peer_connectivity' , 'maximum_peer_connectivity' , 'state_pruning_block_depth' , 'fork_cache_keep_time' , 'component_thread_pool_workers' , 'network_thread_pool_workers' , 'signature_thread_pool_workers' ] )
if invalid_keys :
raise LocalConfigurationError ( "Invalid keys in validator config: " "{}" . format ( ", " . join ( sorted ( list ( invalid_keys ) ) ) ) )
bind_network = None
bind_component = None
bind_consensus = None
for bind in toml_config . get ( "bind" , [ ] ) :
if "network" in bind :
bind_network = bind [ bind . find ( ":" ) + 1 : ]
if "component" in bind :
bind_component = bind [ bind . find ( ":" ) + 1 : ]
if "consensus" in bind :
bind_consensus = bind [ bind . find ( ":" ) + 1 : ]
network_public_key = None
network_private_key = None
if toml_config . get ( "network_public_key" ) is not None :
network_public_key = toml_config . get ( "network_public_key" ) . encode ( )
if toml_config . get ( "network_private_key" ) is not None :
network_private_key = toml_config . get ( "network_private_key" ) . encode ( )
config = ValidatorConfig ( bind_network = bind_network , bind_component = bind_component , bind_consensus = bind_consensus , endpoint = toml_config . get ( "endpoint" , None ) , peering = toml_config . get ( "peering" , None ) , seeds = toml_config . get ( "seeds" , None ) , peers = toml_config . get ( "peers" , None ) , network_public_key = network_public_key , network_private_key = network_private_key , scheduler = toml_config . get ( "scheduler" , None ) , permissions = parse_permissions ( toml_config . get ( "permissions" , None ) ) , roles = toml_config . get ( "roles" , None ) , opentsdb_url = toml_config . get ( "opentsdb_url" , None ) , opentsdb_db = toml_config . get ( "opentsdb_db" , None ) , opentsdb_username = toml_config . get ( "opentsdb_username" , None ) , opentsdb_password = toml_config . get ( "opentsdb_password" , None ) , minimum_peer_connectivity = toml_config . get ( "minimum_peer_connectivity" , None ) , maximum_peer_connectivity = toml_config . get ( "maximum_peer_connectivity" , None ) , state_pruning_block_depth = toml_config . get ( "state_pruning_block_depth" , None ) , fork_cache_keep_time = toml_config . get ( "fork_cache_keep_time" , None ) , component_thread_pool_workers = toml_config . get ( "component_thread_pool_workers" , None ) , network_thread_pool_workers = toml_config . get ( "network_thread_pool_workers" , None ) , signature_thread_pool_workers = toml_config . get ( "signature_thread_pool_workers" , None ) )
return config |
def recursive_division ( self , cells , min_size , width , height , x = 0 , y = 0 , depth = 0 ) :
"""Recursive division :
1 . Split room randomly
1a . Dodge towards larger half if in doorway
2 . Place doorway randomly
3 . Repeat for each half""" | assert isinstance ( cells , list )
assert isinstance ( min_size , int ) or isinstance ( min_size , float )
assert isinstance ( width , int ) or isinstance ( width , float )
assert isinstance ( height , int ) or isinstance ( height , float )
assert isinstance ( x , int ) or isinstance ( x , float )
assert isinstance ( y , int ) or isinstance ( y , float )
assert isinstance ( depth , int )
if width <= min_size or height <= min_size :
return
# Choose axis to divide
if width < height :
axis = VERTICAL
elif height < width :
axis = HORIZONTAL
else :
axis = randint ( 0 , 1 )
cut_size = height
gap_size = width
if axis == HORIZONTAL :
cut_size = width
gap_size = height
if cut_size - min_size < min_size : # print ( ' min cut ' )
return
if gap_size - min_size < min_size : # print ( ' min gap ' )
return
# Random division and doorway
cut = randint ( min_size , cut_size - min_size )
gap = randint ( min_size , gap_size - min_size )
if not ( cut > 0 and gap > 0 ) : # print ( ' Reached zero sized cell ' )
return
# Check if next tile is a doorway
def dodge_doors ( cut ) :
assert isinstance ( cut , int ) or isinstance ( cut , float )
empty = False
if axis == HORIZONTAL :
idx = x + gap_size
# print ( idx , y + cut )
door = cells [ idx ] [ y + cut ]
empty = empty or not door or not door . tile
# door . tile = cells [ 49 ] [ 1 ] . tile
idx = x
# print ( idx , y + cut )
door = cells [ idx ] [ y + cut ]
empty = empty or not door or not door . tile
# door . tile = cells [ 49 ] [ 0 ] . tile
else :
idx = y + gap_size
# print ( x + cut , idx )
door = cells [ x + cut ] [ idx ]
empty = empty or not door or not door . tile
# door . tile = cells [ 49 ] [ 0 ] . tile
idx = y
# print ( x + cut , idx )
door = cells [ x + cut ] [ idx ]
empty = empty or not door or not door . tile
# door . tile = cells [ 49 ] [ 1 ] . tile
# Try again on longest side
if empty : # print ( ' Door ' , idx , cut )
if gap + ( min_size / 2 ) > ( gap_size / 2 ) - ( min_size / 2 ) :
cut -= 1
else :
cut += 1
if cut < min_size or cut > cut_size - min_size : # print ( ' Reached minimum size ' )
return None
else :
return dodge_doors ( cut )
return cut
# Skip doors check first time around
if depth > 0 :
cut = dodge_doors ( cut )
if cut is None : # print ( ' No viable cut found ' )
return None
depth += 1
# Create new wall tiles
for i in xrange ( 0 , gap_size ) :
if abs ( gap - i ) > 0 : # Copy wall tile from ( 0,0)
if axis == HORIZONTAL :
cells [ x + i ] [ y + cut ] . tile = cells [ 0 ] [ 0 ] . tile
else :
cells [ x + cut ] [ y + i ] . tile = cells [ 0 ] [ 0 ] . tile
# Recurse into each half
# print ( x , y , [ cut , gap ] , [ cut _ size , gap _ size ] , ' H ' if ( axis = = HORIZONTAL ) else ' V ' )
nx , ny = x , y
w , h = [ cut , height ] if ( axis == HORIZONTAL ) else [ width , cut ]
self . recursive_division ( cells , min_size , w , h , nx , ny , depth )
nx , ny = [ x + cut , y ] if ( axis != HORIZONTAL ) else [ x , y + cut ]
w , h = [ cut_size - cut , height ] if ( axis == HORIZONTAL ) else [ width , cut_size - cut ]
self . recursive_division ( cells , min_size , w , h , nx , ny , depth ) |
def copy_plan ( modeladmin , request , queryset ) :
"""Admin command for duplicating plans preserving quotas and pricings .""" | for plan in queryset :
plan_copy = deepcopy ( plan )
plan_copy . id = None
plan_copy . available = False
plan_copy . default = False
plan_copy . created = None
plan_copy . save ( force_insert = True )
for pricing in plan . planpricing_set . all ( ) :
pricing . id = None
pricing . plan = plan_copy
pricing . save ( force_insert = True )
for quota in plan . planquota_set . all ( ) :
quota . id = None
quota . plan = plan_copy
quota . save ( force_insert = True ) |
def split_hostname_from_port ( cls , hostname ) :
"""given a hostname : port return a tuple ( hostname , port )""" | bits = hostname . split ( ":" , 2 )
p = None
d = bits [ 0 ]
if len ( bits ) == 2 :
p = int ( bits [ 1 ] )
return d , p |
def _unpickle_collection ( self , collection ) :
"""Unpickles all members of the specified dictionary .""" | for mkey in collection :
if isinstance ( collection [ mkey ] , list ) :
for item in collection [ mkey ] :
item . unpickle ( self )
else :
collection [ mkey ] . unpickle ( self ) |
def get_module ( self , lpBaseOfDll ) :
"""@ type lpBaseOfDll : int
@ param lpBaseOfDll : Base address of the DLL to look for .
@ rtype : L { Module }
@ return : Module object with the given base address .""" | self . __initialize_snapshot ( )
if lpBaseOfDll not in self . __moduleDict :
msg = "Unknown DLL base address %s"
msg = msg % HexDump . address ( lpBaseOfDll )
raise KeyError ( msg )
return self . __moduleDict [ lpBaseOfDll ] |
def parse ( self ) :
"""Parse the options .""" | # Run the parser
opt , arg = self . parser . parse_known_args ( self . arguments )
self . opt = opt
self . arg = arg
self . check ( )
# Enable - - all if no particular stat or group selected
opt . all = not any ( [ getattr ( opt , stat . dest ) or getattr ( opt , group . dest ) for group in self . sample_stats . stats for stat in group . stats ] )
# Time period handling
if opt . since is None and opt . until is None :
opt . since , opt . until , period = did . base . Date . period ( arg )
else :
opt . since = did . base . Date ( opt . since or "1993-01-01" )
opt . until = did . base . Date ( opt . until or "today" )
# Make the ' until ' limit inclusive
opt . until . date += delta ( days = 1 )
period = "given date range"
# Validate the date range
if not opt . since . date < opt . until . date :
raise RuntimeError ( "Invalid date range ({0} to {1})" . format ( opt . since , opt . until . date - delta ( days = 1 ) ) )
header = "Status report for {0} ({1} to {2})." . format ( period , opt . since , opt . until . date - delta ( days = 1 ) )
# Finito
log . debug ( "Gathered options:" )
log . debug ( 'options = {0}' . format ( opt ) )
return opt , header |
def get_subject_with_local_validation ( jwt_bu64 , cert_obj ) :
"""Validate the JWT and return the subject it contains .
- The JWT is validated by checking that it was signed with a CN certificate .
- The returned subject can be trusted for authz and authn operations .
- Possible validation errors include :
- A trusted ( TLS / SSL ) connection could not be made to the CN holding the
signing certificate .
- The JWT could not be decoded .
- The JWT signature signature was invalid .
- The JWT claim set contains invalid " Not Before " or " Expiration Time " claims .
Args :
jwt _ bu64 : bytes
The JWT encoded using a a URL safe flavor of Base64.
cert _ obj : cryptography . Certificate
Public certificate used for signing the JWT ( typically the CN cert ) .
Returns :
- On successful validation , the subject contained in the JWT is returned .
- If validation fails for any reason , errors are logged and None is returned .""" | try :
jwt_dict = validate_and_decode ( jwt_bu64 , cert_obj )
except JwtException as e :
return log_jwt_bu64_info ( logging . error , str ( e ) , jwt_bu64 )
try :
return jwt_dict [ 'sub' ]
except LookupError :
log_jwt_dict_info ( logging . error , 'Missing "sub" key' , jwt_dict ) |
def string_value ( node ) :
"""Compute the string - value of a node .""" | if ( node . nodeType == node . DOCUMENT_NODE or node . nodeType == node . ELEMENT_NODE ) :
s = u''
for n in axes [ 'descendant' ] ( node ) :
if n . nodeType == n . TEXT_NODE :
s += n . data
return s
elif node . nodeType == node . ATTRIBUTE_NODE :
return node . value
elif ( node . nodeType == node . PROCESSING_INSTRUCTION_NODE or node . nodeType == node . COMMENT_NODE or node . nodeType == node . TEXT_NODE ) :
return node . data |
def has_bad_headers ( self ) :
"""Checks for bad headers i . e . newlines in subject , sender or recipients .
RFC5322 allows multiline CRLF with trailing whitespace ( FWS ) in headers""" | headers = [ self . sender , self . reply_to ] + self . recipients
for header in headers :
if _has_newline ( header ) :
return True
if self . subject :
if _has_newline ( self . subject ) :
for linenum , line in enumerate ( self . subject . split ( '\r\n' ) ) :
if not line :
return True
if linenum > 0 and line [ 0 ] not in '\t ' :
return True
if _has_newline ( line ) :
return True
if len ( line . strip ( ) ) == 0 :
return True
return False |
def subspace_detect ( detectors , stream , threshold , trig_int , moveout = 0 , min_trig = 1 , parallel = True , num_cores = None ) :
"""Conduct subspace detection with chosen detectors .
: type detectors : list
: param detectors :
list of : class : ` eqcorrscan . core . subspace . Detector ` to be used
for detection .
: type stream : obspy . core . stream . Stream
: param stream : Stream to detect within .
: type threshold : float
: param threshold :
Threshold between 0 and 1 for detection , see : func : ` Detector . detect `
: type trig _ int : float
: param trig _ int : Minimum trigger interval in seconds .
: type moveout : float
: param moveout :
Maximum allowable moveout window for non - multiplexed , network
detection . See note .
: type min _ trig : int
: param min _ trig :
Minimum number of stations exceeding threshold for non - multiplexed ,
network detection . See note in : func : ` Detector . detect ` .
: type parallel : bool
: param parallel : Whether to run detectors in parallel in groups .
: type num _ cores : int
: param num _ cores :
How many cpu cores to use if parallel = = True . If set to None ( default ) ,
will use all available cores .
: rtype : list
: return :
List of : class : ` eqcorrscan . core . match _ filter . Detection ` detections .
. . Note : :
This will loop through your detectors using their detect method .
If the detectors are multiplexed it will run groups of detectors with
the same channels at the same time .""" | from multiprocessing import Pool , cpu_count
# First check that detector parameters are the same
parameters = [ ]
detections = [ ]
for detector in detectors :
parameter = ( detector . lowcut , detector . highcut , detector . filt_order , detector . sampling_rate , detector . multiplex , detector . stachans )
if parameter not in parameters :
parameters . append ( parameter )
for parameter_set in parameters :
parameter_detectors = [ ]
for detector in detectors :
det_par = ( detector . lowcut , detector . highcut , detector . filt_order , detector . sampling_rate , detector . multiplex , detector . stachans )
if det_par == parameter_set :
parameter_detectors . append ( detector )
stream , stachans = _subspace_process ( streams = [ stream . copy ( ) ] , lowcut = parameter_set [ 0 ] , highcut = parameter_set [ 1 ] , filt_order = parameter_set [ 2 ] , sampling_rate = parameter_set [ 3 ] , multiplex = parameter_set [ 4 ] , stachans = parameter_set [ 5 ] , parallel = True , align = False , shift_len = None , reject = False )
if not parallel :
for detector in parameter_detectors :
detections += _detect ( detector = detector , st = stream [ 0 ] , threshold = threshold , trig_int = trig_int , moveout = moveout , min_trig = min_trig , process = False , extract_detections = False , debug = 0 )
else :
if num_cores :
ncores = num_cores
else :
ncores = cpu_count ( )
pool = Pool ( processes = ncores )
results = [ pool . apply_async ( _detect , args = ( detector , stream [ 0 ] , threshold , trig_int , moveout , min_trig , False , False , 0 ) ) for detector in parameter_detectors ]
pool . close ( )
try :
_detections = [ p . get ( ) for p in results ]
except KeyboardInterrupt as e : # pragma : no cover
pool . terminate ( )
raise e
pool . join ( )
for d in _detections :
if isinstance ( d , list ) :
detections += d
else :
detections . append ( d )
return detections |
def _raw ( self , msg ) :
"""Print any command sent in raw format
: param msg : arbitrary code to be printed
: type msg : bytes""" | self . device . write ( msg )
if self . auto_flush :
self . flush ( ) |
def find_occurrences ( self , resource = None , pymodule = None ) :
"""Generate ` Occurrence ` instances""" | tools = _OccurrenceToolsCreator ( self . project , resource = resource , pymodule = pymodule , docs = self . docs )
for offset in self . _textual_finder . find_offsets ( tools . source_code ) :
occurrence = Occurrence ( tools , offset )
for filter in self . filters :
result = filter ( occurrence )
if result is None :
continue
if result :
yield occurrence
break |
def initializePage ( self ) :
"""Initializes the page based on the current structure information .""" | tree = self . uiStructureTREE
tree . blockSignals ( True )
tree . setUpdatesEnabled ( False )
self . uiStructureTREE . clear ( )
xstruct = self . scaffold ( ) . structure ( )
self . _structure = xstruct
for xentry in xstruct :
XScaffoldElementItem ( tree , xentry )
tree . blockSignals ( False )
tree . setUpdatesEnabled ( True ) |
def _find_contpix ( wl , fluxes , ivars , target_frac ) :
"""Find continuum pix in spec , meeting a set target fraction
Parameters
wl : numpy ndarray
rest - frame wavelength vector
fluxes : numpy ndarray
pixel intensities
ivars : numpy ndarray
inverse variances , parallel to fluxes
target _ frac : float
the fraction of pixels in spectrum desired to be continuum
Returns
contmask : boolean numpy ndarray
True corresponds to continuum pixels""" | print ( "Target frac: %s" % ( target_frac ) )
bad1 = np . median ( ivars , axis = 0 ) == SMALL
bad2 = np . var ( ivars , axis = 0 ) == 0
bad = np . logical_and ( bad1 , bad2 )
npixels = len ( wl ) - sum ( bad )
f_cut = 0.0001
stepsize = 0.0001
sig_cut = 0.0001
contmask = _find_contpix_given_cuts ( f_cut , sig_cut , wl , fluxes , ivars )
if npixels > 0 :
frac = sum ( contmask ) / float ( npixels )
else :
frac = 0
while ( frac < target_frac ) :
f_cut += stepsize
sig_cut += stepsize
contmask = _find_contpix_given_cuts ( f_cut , sig_cut , wl , fluxes , ivars )
if npixels > 0 :
frac = sum ( contmask ) / float ( npixels )
else :
frac = 0
if frac > 0.10 * npixels :
print ( "Warning: Over 10% of pixels identified as continuum." )
print ( "%s out of %s pixels identified as continuum" % ( sum ( contmask ) , npixels ) )
print ( "Cuts: f_cut %s, sig_cut %s" % ( f_cut , sig_cut ) )
return contmask |
def References ( self ) :
"""Get all references .
Returns :
dict :
Key ( UInt256 ) : input PrevHash
Value ( TransactionOutput ) : object .""" | if self . __references is None :
refs = { }
# group by the input prevhash
for hash , group in groupby ( self . inputs , lambda x : x . PrevHash ) :
tx , height = GetBlockchain ( ) . GetTransaction ( hash . ToBytes ( ) )
if tx is not None :
for input in group :
refs [ input ] = tx . outputs [ input . PrevIndex ]
self . __references = refs
return self . __references |
def distance ( self , other_or_start = None , end = None , features = False ) :
"""check the distance between this an another interval
Parameters
other _ or _ start : Interval or int
either an integer or an Interval with a start attribute indicating
the start of the interval
end : int
if ` other _ or _ start ` is an integer , this must be an integer
indicating the end of the interval
features : bool
if True , the features , such as CDS , intron , etc . that this feature
overlaps are returned .""" | if end is None :
assert other_or_start . chrom == self . chrom
other_start , other_end = get_start_end ( other_or_start , end )
if other_start > self . end :
return other_start - self . end
if self . start > other_end :
return self . start - other_end
return 0 |
def get_connection_details ( session , vcenter_resource_model , resource_context ) :
"""Methods retrieves the connection details from the vcenter resource model attributes .
: param CloudShellAPISession session :
: param VMwarevCenterResourceModel vcenter _ resource _ model : Instance of VMwarevCenterResourceModel
: param ResourceContextDetails resource _ context : the context of the command""" | session = session
resource_context = resource_context
# get vCenter connection details from vCenter resource
user = vcenter_resource_model . user
vcenter_url = resource_context . address
password = session . DecryptPassword ( vcenter_resource_model . password ) . Value
return VCenterConnectionDetails ( vcenter_url , user , password ) |
def is_legal ( self , layers = None ) :
'''Judge whether is legal for layers''' | if layers is None :
layers = self . layers
for layer in layers :
if layer . is_delete is False :
if len ( layer . input ) != layer . input_size :
return False
if len ( layer . output ) < layer . output_size :
return False
# layer _ num < = max _ layer _ num
if self . layer_num ( layers ) > self . max_layer_num :
return False
# There is loop in graph | | some layers can ' t to arrive
if self . is_topology ( layers ) is False :
return False
return True |
def gamma ( ranks_list1 , ranks_list2 ) :
'''Goodman and Kruskal ' s gamma correlation coefficient
: param ranks _ list1 : a list of ranks ( integers )
: param ranks _ list2 : a second list of ranks ( integers ) of equal length with corresponding entries
: return : Gamma correlation coefficient ( rank correlation ignoring ties )''' | num_concordant_pairs = 0
num_discordant_pairs = 0
num_tied_x = 0
num_tied_y = 0
num_tied_xy = 0
num_items = len ( ranks_list1 )
for i in range ( num_items ) :
rank_1 = ranks_list1 [ i ]
rank_2 = ranks_list2 [ i ]
for j in range ( i + 1 , num_items ) :
diff1 = ranks_list1 [ j ] - rank_1
diff2 = ranks_list2 [ j ] - rank_2
if ( diff1 > 0 and diff2 > 0 ) or ( diff1 < 0 and diff2 < 0 ) :
num_concordant_pairs += 1
elif ( diff1 > 0 and diff2 < 0 ) or ( diff1 < 0 and diff2 > 0 ) :
num_discordant_pairs += 1
elif diff1 == 0 and diff2 == 0 :
num_tied_xy += 1
elif diff1 == 0 :
num_tied_x += 1
elif diff2 == 0 :
num_tied_y += 1
try :
gamma_corr_coeff = float ( num_concordant_pairs - num_discordant_pairs ) / float ( num_concordant_pairs + num_discordant_pairs )
except :
gamma_corr_coeff = 'n/a'
return [ num_tied_x , num_tied_y , num_tied_xy , gamma_corr_coeff ] |
def _filter_hooks ( self , * hook_kinds ) :
"""Filter a list of hooks , keeping only applicable ones .""" | hooks = sum ( ( self . hooks . get ( kind , [ ] ) for kind in hook_kinds ) , [ ] )
return sorted ( hook for hook in hooks if hook . applies_to ( self . transition , self . current_state ) ) |
def getAddr ( self , ifname ) :
"""Get the inet addr for an interface .
@ param ifname : interface name
@ type ifname : string""" | if sys . platform == 'darwin' :
return ifconfig_inet ( ifname ) . get ( 'address' )
return self . _getaddr ( ifname , self . SIOCGIFADDR ) |
def init_sentry ( self , ) :
"""Initializes sentry . io error logging for this session""" | if not self . use_sentry :
return
sentry_config = self . keychain . get_service ( "sentry" )
tags = { "repo" : self . repo_name , "branch" : self . repo_branch , "commit" : self . repo_commit , "cci version" : cumulusci . __version__ , }
tags . update ( self . config . get ( "sentry_tags" , { } ) )
env = self . config . get ( "sentry_environment" , "CumulusCI CLI" )
self . sentry = raven . Client ( dsn = sentry_config . dsn , environment = env , tags = tags , processors = ( "raven.processors.SanitizePasswordsProcessor" , ) , ) |
def get_all_migrations ( path , databases = None ) :
"""Returns a dictionary of database = > [ migrations ] representing all
migrations contained in ` ` path ` ` .""" | # database : [ ( number , full _ path ) ]
possible_migrations = defaultdict ( list )
try :
in_directory = sorted ( get_file_list ( path ) )
except OSError :
import traceback
print "An error occurred while reading migrations from %r:" % path
traceback . print_exc ( )
return { }
# Iterate through our results and discover which migrations are
# actually runnable
for full_path in in_directory :
child_path , script = os . path . split ( full_path )
name , ext = os . path . splitext ( script )
# the database component is default if this is in the root directory
# is < directory > if in a subdirectory
if path == child_path :
db = DEFAULT_DB_ALIAS
else :
db = os . path . split ( child_path ) [ - 1 ]
# filter by database if set
if databases and db not in databases :
continue
match = MIGRATION_NAME_RE . match ( name )
if match is None :
raise MigrationError ( "Invalid migration file prefix %r " "(must begin with a number)" % name )
number = int ( match . group ( 1 ) )
if ext in [ ".sql" , ".py" ] :
possible_migrations [ db ] . append ( ( number , full_path ) )
return possible_migrations |
def get_user_by_email ( server_context , email ) :
"""Get the user with the provided email . Throws a ValueError if not found .
: param server _ context : A LabKey server context . See utils . create _ server _ context .
: param email :
: return :""" | url = server_context . build_url ( user_controller , 'getUsers.api' )
payload = dict ( includeDeactivatedAccounts = True )
result = server_context . make_request ( url , payload )
if result is None or result [ 'users' ] is None :
raise ValueError ( "No Users in container" + email )
for user in result [ 'users' ] :
if user [ 'email' ] == email :
return user
else :
raise ValueError ( "User not found: " + email ) |
def create ( database , tlmdict = None ) :
"""Creates a new database for the given Telemetry Dictionary and
returns a connection to it .""" | if tlmdict is None :
tlmdict = tlm . getDefaultDict ( )
dbconn = connect ( database )
for name , defn in tlmdict . items ( ) :
createTable ( dbconn , defn )
return dbconn |
def from_pb ( cls , database_pb , instance , pool = None ) :
"""Creates an instance of this class from a protobuf .
: type database _ pb :
: class : ` google . spanner . v2 . spanner _ instance _ admin _ pb2 . Instance `
: param database _ pb : A instance protobuf object .
: type instance : : class : ` ~ google . cloud . spanner _ v1 . instance . Instance `
: param instance : The instance that owns the database .
: type pool : concrete subclass of
: class : ` ~ google . cloud . spanner _ v1 . pool . AbstractSessionPool ` .
: param pool : ( Optional ) session pool to be used by database .
: rtype : : class : ` Database `
: returns : The database parsed from the protobuf response .
: raises ValueError :
if the instance name does not match the expected format
or if the parsed project ID does not match the project ID
on the instance ' s client , or if the parsed instance ID does
not match the instance ' s ID .""" | match = _DATABASE_NAME_RE . match ( database_pb . name )
if match is None :
raise ValueError ( "Database protobuf name was not in the " "expected format." , database_pb . name , )
if match . group ( "project" ) != instance . _client . project :
raise ValueError ( "Project ID on database does not match the " "project ID on the instance's client" )
instance_id = match . group ( "instance_id" )
if instance_id != instance . instance_id :
raise ValueError ( "Instance ID on database does not match the " "Instance ID on the instance" )
database_id = match . group ( "database_id" )
return cls ( database_id , instance , pool = pool ) |
def subscribe ( self , frame ) :
"""Handle the SUBSCRIBE command : Adds this connection to destination .""" | ack = frame . headers . get ( 'ack' )
reliable = ack and ack . lower ( ) == 'client'
self . engine . connection . reliable_subscriber = reliable
dest = frame . headers . get ( 'destination' )
if not dest :
raise ProtocolError ( 'Missing destination for SUBSCRIBE command.' )
if dest . startswith ( '/queue/' ) :
self . engine . queue_manager . subscribe ( self . engine . connection , dest )
else :
self . engine . topic_manager . subscribe ( self . engine . connection , dest ) |
def wait_for ( self , condition , interval = DEFAULT_WAIT_INTERVAL , timeout = DEFAULT_WAIT_TIMEOUT ) :
"""Wait until a condition holds by checking it in regular intervals .
Raises ` ` WaitTimeoutError ` ` on timeout .""" | start = time . time ( )
# at least execute the check once !
while True :
res = condition ( )
if res :
return res
# timeout ?
if time . time ( ) - start > timeout :
break
# wait a bit
time . sleep ( interval )
# timeout occured !
raise WaitTimeoutError ( "wait_for timed out" ) |
def _process_value ( value ) :
"""Convert the value into a human readable diff string""" | if not value :
value = _ ( "Not set" )
# XXX : bad data , e . g . in AS Method field
elif value == "None" :
value = _ ( "Not set" )
# 0 is detected as the portal UID
elif value == "0" :
pass
elif api . is_uid ( value ) :
value = _get_title_or_id_from_uid ( value )
elif isinstance ( value , ( dict ) ) :
value = json . dumps ( sorted ( value . items ( ) ) , indent = 1 )
elif isinstance ( value , ( list , tuple ) ) :
value = sorted ( map ( _process_value , value ) )
value = "; " . join ( value )
elif isinstance ( value , unicode ) :
value = api . safe_unicode ( value ) . encode ( "utf8" )
return str ( value ) |
def url_params_previous_page ( self ) :
""": rtype : dict [ str , str ]""" | self . assert_has_previous_page ( )
params = { self . PARAM_OLDER_ID : str ( self . older_id ) }
self . _add_count_to_params_if_needed ( params )
return params |
def bz2_opener ( path , pattern = '' , verbose = False ) :
"""Opener that opens single bz2 compressed file .
: param str path : Path .
: param str pattern : Regular expression pattern .
: return : Filehandle ( s ) .""" | source = path if is_url ( path ) else os . path . abspath ( path )
filename = os . path . basename ( path )
if pattern and not re . match ( pattern , filename ) :
logger . verbose ( 'Skipping file: {}, did not match regex pattern "{}"' . format ( os . path . abspath ( path ) , pattern ) )
return
try :
filehandle = bz2 . open ( io . BytesIO ( urlopen ( path ) . read ( ) ) ) if is_url ( path ) else bz2 . open ( path )
filehandle . read ( 1 )
filehandle . seek ( 0 )
logger . verbose ( 'Processing file: {}' . format ( source ) )
yield filehandle
except ( OSError , IOError ) :
raise BZ2ValidationError |
def get_all_distribution_names ( url = None ) :
"""Return all distribution names known by an index .
: param url : The URL of the index .
: return : A list of all known distribution names .""" | if url is None :
url = DEFAULT_INDEX
client = ServerProxy ( url , timeout = 3.0 )
return client . list_packages ( ) |
def unregisterWalkthrough ( self , walkthrough ) :
"""Unregisters the inputed walkthrough from the application walkthroug
list .
: param walkthrough | < XWalkthrough >""" | if type ( walkthrough ) in ( str , unicode ) :
walkthrough = self . findWalkthrough ( walkthrough )
try :
self . _walkthroughs . remove ( walkthrough )
except ValueError :
pass |
def enum_device_interfaces ( h_info , guid ) :
"""Function generator that returns a device _ interface _ data enumerator
for the given device interface info and GUID parameters""" | dev_interface_data = SP_DEVICE_INTERFACE_DATA ( )
dev_interface_data . cb_size = sizeof ( dev_interface_data )
device_index = 0
while SetupDiEnumDeviceInterfaces ( h_info , None , byref ( guid ) , device_index , byref ( dev_interface_data ) ) :
yield dev_interface_data
device_index += 1
del dev_interface_data |
def delete_view ( self , request , object_id , extra_context = None ) :
"""Overrides the default to enable redirecting to the directory view after
deletion of a folder .
we need to fetch the object and find out who the parent is
before super , because super will delete the object and make it
impossible to find out the parent folder to redirect to .""" | parent_folder = None
try :
obj = self . queryset ( request ) . get ( pk = unquote ( object_id ) )
parent_folder = obj . parent
except self . model . DoesNotExist :
obj = None
r = super ( FolderAdmin , self ) . delete_view ( request = request , object_id = object_id , extra_context = extra_context )
url = r . get ( "Location" , None )
if url in [ "../../../../" , "../../" ] or url == self . _get_post_url ( obj ) :
if parent_folder :
url = reverse ( 'admin:filer-directory_listing' , kwargs = { 'folder_id' : parent_folder . id } )
else :
url = reverse ( 'admin:filer-directory_listing-root' )
url = "%s%s%s" % ( url , popup_param ( request ) , selectfolder_param ( request , "&" ) )
return HttpResponseRedirect ( url )
return r |
def profile_slope ( self , kwargs_lens_list , lens_model_internal_bool = None , num_points = 10 ) :
"""computes the logarithmic power - law slope of a profile
: param kwargs _ lens _ list : lens model keyword argument list
: param lens _ model _ internal _ bool : bool list , indicate which part of the model to consider
: param num _ points : number of estimates around the Einstein radius
: return :""" | theta_E = self . effective_einstein_radius ( kwargs_lens_list )
x0 = kwargs_lens_list [ 0 ] [ 'center_x' ]
y0 = kwargs_lens_list [ 0 ] [ 'center_y' ]
x , y = util . points_on_circle ( theta_E , num_points )
dr = 0.01
x_dr , y_dr = util . points_on_circle ( theta_E + dr , num_points )
if lens_model_internal_bool is None :
lens_model_internal_bool = [ True ] * len ( kwargs_lens_list )
alpha_E_x_i , alpha_E_y_i = self . _lensModel . alpha ( x0 + x , y0 + y , kwargs_lens_list , k = lens_model_internal_bool )
alpha_E_r = np . sqrt ( alpha_E_x_i ** 2 + alpha_E_y_i ** 2 )
alpha_E_dr_x_i , alpha_E_dr_y_i = self . _lensModel . alpha ( x0 + x_dr , y0 + y_dr , kwargs_lens_list , k = lens_model_internal_bool )
alpha_E_dr = np . sqrt ( alpha_E_dr_x_i ** 2 + alpha_E_dr_y_i ** 2 )
slope = np . mean ( np . log ( alpha_E_dr / alpha_E_r ) / np . log ( ( theta_E + dr ) / theta_E ) )
gamma = - slope + 2
return gamma |
def addr_spec ( self ) :
"""The addr _ spec ( username @ domain ) portion of the address , quoted
according to RFC 5322 rules , but with no Content Transfer Encoding .""" | nameset = set ( self . username )
if len ( nameset ) > len ( nameset - parser . DOT_ATOM_ENDS ) :
lp = parser . quote_string ( self . username )
else :
lp = self . username
if self . domain :
return lp + '@' + self . domain
if not lp :
return '<>'
return lp |
def monkeypatch ( ) :
"""monkeypath built - in numpy functions to call those provided by nparray instead .""" | np . array = array
np . arange = arange
np . linspace = linspace
np . logspace = logspace
np . geomspace = geomspace
np . full = full
np . full_like = full_like
np . zeros = zeros
np . zeros_like = zeros_like
np . ones = ones
np . ones_like = ones_like
np . eye = eye |
def p_const_ref ( self , p ) :
'''const _ ref : IDENTIFIER''' | p [ 0 ] = ast . ConstReference ( p [ 1 ] , lineno = p . lineno ( 1 ) ) |
def removeduplicates ( self , entries = None ) :
'''Loop over children a remove duplicate entries .
@ return - a list of removed entries''' | removed = [ ]
if entries == None :
entries = { }
new_children = [ ]
for c in self . children :
cs = str ( c )
cp = entries . get ( cs , None )
if cp :
new_children . append ( cp )
removed . append ( c )
else :
dups = c . removeduplicates ( entries )
if dups :
removed . extend ( dups )
entries [ cs ] = c
new_children . append ( c )
self . children = new_children
return removed |
def get_max_ptrm_check ( ptrm_checks_included_temps , ptrm_checks_all_temps , ptrm_x , t_Arai , x_Arai ) :
"""input : ptrm _ checks _ included _ temps , ptrm _ checks _ all _ temps , ptrm _ x , t _ Arai , x _ Arai .
sorts through included ptrm _ checks and finds the largest ptrm check diff ,
the sum of the total diffs ,
and the percentage of the largest check / original measurement at that temperature step
output : max _ diff , sum _ diffs , check _ percent , sum _ abs _ diffs .""" | if not ptrm_checks_included_temps :
return [ ] , float ( 'nan' ) , float ( 'nan' ) , float ( 'nan' ) , float ( 'nan' )
diffs = [ ]
abs_diffs = [ ]
x_Arai_compare = [ ]
ptrm_compare = [ ]
check_percents = [ ]
ptrm_checks_all_temps = list ( ptrm_checks_all_temps )
for check in ptrm_checks_included_temps : # goes through each included temperature step
ptrm_ind = ptrm_checks_all_temps . index ( check )
# indexes the number of the check
ptrm_check = ptrm_x [ ptrm_ind ]
# x value at that temperature step
ptrm_compare . append ( ptrm_check )
arai_ind = t_Arai . index ( check )
ptrm_orig = x_Arai [ arai_ind ]
x_Arai_compare . append ( ptrm_orig )
diff = ptrm_orig - ptrm_check
diffs . append ( diff )
abs_diffs . append ( abs ( diff ) )
if ptrm_orig == 0 :
check_percents . append ( 0 )
else :
check_percents . append ( ( old_div ( abs ( diff ) , ptrm_orig ) ) * 100 )
max_diff = max ( abs_diffs )
check_percent = max ( check_percents )
sum_diffs = abs ( sum ( diffs ) )
sum_abs_diffs = sum ( abs_diffs )
return diffs , max_diff , sum_diffs , check_percent , sum_abs_diffs |
def get ( self , name , * subkey ) :
"""retrieves a data item , or loads it if it
is not present .""" | if subkey == [ ] :
return self . get_atomic ( name )
else :
return self . get_subkey ( name , tuple ( subkey ) ) |
def generate ( self , minlen , maxlen ) :
"""Generates words of different length without storing
them into memory , enforced by itertools . product""" | if minlen < 1 or maxlen < minlen :
raise ValueError ( )
for cur in range ( minlen , maxlen + 1 ) : # string product generator
str_generator = product ( self . charset , repeat = cur )
for each in str_generator : # yield the produced word
yield '' . join ( each ) + self . delimiter |
def get_task ( self , name , include_helpers = True ) :
"""Get task identified by name or raise TaskNotFound if there
is no such task
: param name : name of helper / task to get
: param include _ helpers : if True , also look for helpers
: return : task or helper identified by name""" | if not include_helpers and name in self . _helper_names :
raise TaskNotFound ( name )
try :
return getattr ( self . _tasks , name )
except AttributeError :
raise TaskNotFound ( name ) |
def _topological_sort ( self ) :
"""Kahn ' s algorithm for Topological Sorting
- Finds cycles in graph
- Computes dependency weight""" | sorted_graph = [ ]
node_map = self . _graph . get_nodes ( )
nodes = [ NodeVisitor ( node_map [ node ] ) for node in node_map ]
def get_pointers_for_edge_nodes ( visitor_decorated_node ) :
edges = [ ]
edge_ids = visitor_decorated_node . get_node ( ) . get_edges ( )
for node in nodes :
if node . get_id ( ) in edge_ids :
edges . append ( node )
return edges
# node is initially weighted with the number of immediate dependencies
for node in nodes :
for edge in get_pointers_for_edge_nodes ( node ) :
edge . increment ( )
# Start with a list of nodes who have no dependents
resolved = [ node for node in nodes if node . get_weight ( ) == 0 ]
while resolved :
node = resolved . pop ( )
sorted_graph . append ( node )
for edge in get_pointers_for_edge_nodes ( node ) :
edge . decrement ( )
if edge . get_weight ( ) == 0 :
resolved . append ( edge )
self . _circular_dependencies = [ node . get_node ( ) for node in nodes if node . get_weight ( ) > 0 ]
self . _sorted_nodes = list ( reversed ( [ node . get_node ( ) for node in sorted_graph ] ) ) |
def _validate_type ( self , item , name ) :
"""Validate the item against ` allowed _ types ` .""" | if item is None : # don ' t validate None items , since they ' ll be caught by the portion
# of the validator responsible for handling ` required ` ness
return
if not isinstance ( item , self . allowed_types ) :
item_class_name = item . __class__ . __name__
raise ArgumentError ( name , "Expected one of %s, but got `%s`" % ( self . allowed_types , item_class_name ) ) |
def asizeof ( * objs , ** opts ) :
'''Return the combined size in bytes of all objects passed as positional argments .
The available options and defaults are the following .
* align = 8 * - - size alignment
* all = False * - - all current objects
* clip = 80 * - - clip ` ` repr ( ) ` ` strings
* code = False * - - incl . ( byte ) code size
* derive = False * - - derive from super type
* ignored = True * - - ignore certain types
* infer = False * - - try to infer types
* limit = 100 * - - recursion limit
* stats = 0.0 * - - print statistics
Set * align * to a power of 2 to align sizes . Any value less
than 2 avoids size alignment .
All current module , global and stack objects are sized if
* all * is True and if no positional arguments are supplied .
A positive * clip * value truncates all repr ( ) strings to at
most * clip * characters .
The ( byte ) code size of callable objects like functions ,
methods , classes , etc . is included only if * code * is True .
If * derive * is True , new types are handled like an existing
( super ) type provided there is one and only of those .
By default certain base types like object , super , etc . are
ignored . Set * ignored * to False to include those .
If * infer * is True , new types are inferred from attributes
( only implemented for dict types on callable attributes
as get , has _ key , items , keys and values ) .
Set * limit * to a positive value to accumulate the sizes of
the referents of each object , recursively up to the limit .
Using * limit = 0 * returns the sum of the flat [ 4 ] sizes of
the given objects . High * limit * values may cause runtime
errors and miss objects for sizing .
A positive value for * stats * prints up to 8 statistics , ( 1)
a summary of the number of objects sized and seen , ( 2 ) a
simple profile of the sized objects by type and ( 3 + ) up to
6 tables showing the static , dynamic , derived , ignored ,
inferred and dict types used , found resp . installed . The
fractional part of the * stats * value ( x100 ) is the cutoff
percentage for simple profiles .
[4 ] See the documentation of this module for the definition of flat size .''' | t , p = _objs_opts ( objs , ** opts )
if t :
_asizer . reset ( ** p )
s = _asizer . asizeof ( * t )
_asizer . print_stats ( objs = t , opts = opts )
# show opts as _ kwdstr
_asizer . _clear ( )
else :
s = 0
return s |
def make_energy_funnel_data ( self , cores = 1 ) :
"""Compares models created during the minimisation to the best model .
Returns
energy _ rmsd _ gen : [ ( float , float , int ) ]
A list of triples containing the BUFF score , RMSD to the
top model and generation of a model generated during the
minimisation .""" | if not self . parameter_log :
raise AttributeError ( 'No parameter log data to make funnel, have you ran the ' 'optimiser?' )
model_cls = self . _params [ 'specification' ]
gen_tagged = [ ]
for gen , models in enumerate ( self . parameter_log ) :
for model in models :
gen_tagged . append ( ( model [ 0 ] , model [ 1 ] , gen ) )
sorted_pps = sorted ( gen_tagged , key = lambda x : x [ 1 ] )
top_result = sorted_pps [ 0 ]
top_result_model = model_cls ( * top_result [ 0 ] )
if ( cores == 1 ) or ( sys . platform == 'win32' ) :
energy_rmsd_gen = map ( self . funnel_rebuild , [ ( x , top_result_model , self . _params [ 'specification' ] ) for x in sorted_pps [ 1 : ] ] )
else :
with futures . ProcessPoolExecutor ( max_workers = self . _params [ 'processors' ] ) as executor :
energy_rmsd_gen = executor . map ( self . funnel_rebuild , [ ( x , top_result_model , self . _params [ 'specification' ] ) for x in sorted_pps [ 1 : ] ] )
return list ( energy_rmsd_gen ) |
def send ( self , data ) :
"""Open transport , send data , and yield response chunks .""" | try :
proc = subprocess . Popen ( self . cmd , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
except OSError as exc :
raise URLError ( "Calling %r failed (%s)!" % ( ' ' . join ( self . cmd ) , exc ) )
else :
stdout , stderr = proc . communicate ( data )
if proc . returncode :
raise URLError ( "Calling %r failed with RC=%d!\n%s" % ( ' ' . join ( self . cmd ) , proc . returncode , stderr , ) )
yield stdout |
def create_a10_device_instance ( self , context , a10_device_instance ) :
"""Attempt to create instance using neutron context""" | LOG . debug ( "A10DeviceInstancePlugin.create(): a10_device_instance=%s" , a10_device_instance )
config = a10_config . A10Config ( )
vthunder_defaults = config . get_vthunder_config ( )
imgr = instance_manager . InstanceManager . from_config ( config , context )
dev_instance = common_resources . remove_attributes_not_specified ( a10_device_instance . get ( resources . RESOURCE ) )
# Create the instance with specified defaults .
vthunder_config = vthunder_defaults . copy ( )
vthunder_config . update ( _convert ( dev_instance , _API , _VTHUNDER_CONFIG ) )
instance = imgr . create_device_instance ( vthunder_config , dev_instance . get ( "name" ) )
db_record = { }
db_record . update ( _convert ( vthunder_config , _VTHUNDER_CONFIG , _DB ) )
db_record . update ( _convert ( dev_instance , _API , _DB ) )
db_record . update ( _convert ( instance , _INSTANCE , _DB ) )
# If success , return the created DB record
# Else , raise an exception because that ' s what we would do anyway
db_instance = super ( A10DeviceInstancePlugin , self ) . create_a10_device_instance ( context , { resources . RESOURCE : db_record } )
return _make_api_dict ( db_instance ) |
def increase_last ( self , k ) :
"""Increase the counter of the last matched rule by k .""" | rule = self . _last_rule
if rule is not None :
rule . increase_last ( k ) |
def allKeys ( self ) :
"""Returns a list of all the keys for this settings instance .
: return [ < str > , . . ]""" | if self . _customFormat :
return self . _customFormat . allKeys ( )
else :
return super ( XSettings , self ) . allKeys ( ) |
def _parse_action ( self , action , current_time ) :
"""Parse a player action .
TODO : handle cancels""" | if action . action_type == 'research' :
name = mgz . const . TECHNOLOGIES [ action . data . technology_type ]
self . _research [ action . data . player_id ] . append ( { 'technology' : name , 'timestamp' : _timestamp_to_time ( action . timestamp ) } )
elif action . action_type == 'build' :
self . _build [ action . data . player_id ] . append ( { 'building' : mgz . const . UNITS [ action . data . building_type ] , 'timestamp' : _timestamp_to_time ( current_time ) , 'coordinates' : { 'x' : action . data . x , 'y' : action . data . y } } )
elif action . action_type == 'queue' :
for _ in range ( 0 , int ( action . data . number ) ) :
self . _queue . append ( { 'unit' : mgz . const . UNITS [ action . data . unit_type ] , 'timestamp' : _timestamp_to_time ( current_time ) } ) |
def cdn_set_conf ( self , cname , originConf , environment , token ) :
"""The cdn _ set _ conf method enables the user to update an existing
origin configuration in the CDN .
The cdn _ get _ conf returns the token in the response and the
cdn _ set _ conf requires a token as one of the parameters .
The set action is valid only if the token returned is equal to
the token representing the current version of the configuration file .""" | return self . client . service . cdn_set_conf ( cname , originConf , environment , token ) |
def feed_forward_builder ( name , hidden_dim , activation , trainable = True ) :
"""Get position - wise feed - forward layer builder .
: param name : Prefix of names for internal layers .
: param hidden _ dim : Hidden dimension of feed forward layer .
: param activation : Activation for feed - forward layer .
: param trainable : Whether the layer is trainable .
: return :""" | def _feed_forward_builder ( x ) :
return FeedForward ( units = hidden_dim , activation = activation , trainable = trainable , name = name , ) ( x )
return _feed_forward_builder |
def salm2map ( salm , s , lmax , Ntheta , Nphi ) :
"""Convert mode weights of spin - weighted function to values on a grid
Parameters
salm : array _ like , complex , shape ( . . . , ( lmax + 1 ) * * 2)
Input array representing mode weights of the spin - weighted function . This array may be
multi - dimensional , where initial dimensions may represent different times , for example , or
separate functions on the sphere . The final dimension should give the values of the mode
weights , in the order described below in the ' Notes ' section .
s : int or array , int , shape ( . . . )
Spin weight of the function . If ` salm ` is multidimensional and this is an array , its
dimensions must match the first dimensions of ` salm ` , and the different values are the spin
weights of the different functions represented by those dimensions . Otherwise , if ` salm ` is
multidimensional and ` s ` is a single integer , all functions are assumed to have the same
spin weight .
lmax : int
The largest ` ell ` value present in the input array .
Ntheta : int
Number of points in the output grid along the polar angle .
Nphi : int
Number of points in the output grid along the azimuthal angle .
Returns
map : ndarray , complex , shape ( . . . , Ntheta , Nphi )
Values of the spin - weighted function on grid points of the sphere . This array is shaped
like the input ` salm ` array , but has one extra dimension . The final two dimensions describe
the values of the function on the sphere .
See also
spinsfast . map2salm : Roughly the inverse of this function .
Notes
The input ` salm ` data should be given in increasing order of ` ell ` value , always starting with
( ell , m ) = ( 0 , 0 ) even if ` s ` is nonzero , proceeding to ( 1 , - 1 ) , ( 1 , 0 ) , ( 1 , 1 ) , etc .
Explicitly , the ordering should match this :
[ f _ lm ( ell , m ) for ell in range ( lmax + 1 ) for m in range ( - ell , ell + 1 ) ]
The input is converted to a contiguous complex numpy array if necessary .
The output data are presented on this grid of spherical coordinates :
np . array ( [ [ f ( theta , phi )
for phi in np . linspace ( 0.0 , 2 * np . pi , num = 2 * lmax + 1 , endpoint = False ) ]
for theta in np . linspace ( 0.0 , np . pi , num = 2 * lmax + 1 , endpoint = True ) ] )
Note that ` map2salm ` and ` salm2map ` are not true inverses of each other for several reasons .
First , modes with ` ell < | s | ` should always be zero ; they are simply assumed to be zero on input
to ` salm2map ` . It is also possible to define a ` map ` function that violates this assumption - -
for example , having a nonzero average value over the sphere , if the function has nonzero spin
` s ` , this is impossible . Also , it is possible to define a map of a function with so much
angular dependence that it cannot be captured with the given ` lmax ` value . For example , a
discontinuous function will never be perfectly resolved .
Example
> > > s = - 2
> > > lmax = 8
> > > Ntheta = Nphi = 2 * lmax + 1
> > > modes = np . zeros ( spinsfast . N _ lm ( lmax ) , dtype = np . complex128)
> > > modes [ spinsfast . lm _ ind ( 2 , 2 , 8 ) ] = 1.0
> > > values = spinsfast . salm2map ( modes , s , lmax , Ntheta , Nphi )""" | if Ntheta < 2 or Nphi < 1 :
raise ValueError ( "Input values of Ntheta={0} and Nphi={1} " . format ( Ntheta , Nphi ) + "are not allowed; they must be greater than 1 and 0, respectively." )
if lmax < 1 :
raise ValueError ( "Input value of lmax={0} " . format ( lmax ) + "is not allowed; it must be greater than 0 and should be greater " + "than |s|={0}." . format ( abs ( s ) ) )
import numpy as np
salm = np . ascontiguousarray ( salm , dtype = np . complex128 )
if salm . shape [ - 1 ] < N_lm ( lmax ) :
raise ValueError ( "The input `salm` array of shape {0} is too small for the stated `lmax` of {1}. " . format ( salm . shape , lmax ) + "Perhaps you forgot to include the (zero) modes with ell<|s|." )
map = np . empty ( salm . shape [ : - 1 ] + ( Ntheta , Nphi ) , dtype = np . complex128 )
if salm . ndim > 1 :
s = np . ascontiguousarray ( s , dtype = np . intc )
if s . ndim != salm . ndim - 1 or np . product ( s . shape ) != np . product ( salm . shape [ : - 1 ] ) :
s = s * np . ones ( salm . shape [ : - 1 ] , dtype = np . intc )
_multi_salm2map ( salm , map , s , lmax , Ntheta , Nphi )
else :
_salm2map ( salm , map , s , lmax , Ntheta , Nphi )
return map |
def list ( self , ** params ) :
"""Retrieve all lead unqualified reasons
Returns all lead unqualified reasons available to the user according to the parameters provided
: calls : ` ` get / lead _ unqualified _ reasons ` `
: param dict params : ( optional ) Search options .
: return : List of dictionaries that support attriubte - style access , which represent collection of LeadUnqualifiedReasons .
: rtype : list""" | _ , _ , lead_unqualified_reasons = self . http_client . get ( "/lead_unqualified_reasons" , params = params )
return lead_unqualified_reasons |
def recurrence ( self , recurrence ) :
"""See ` recurrence ` .""" | if not is_valid_recurrence ( recurrence ) :
raise KeyError ( "'%s' is not a valid recurrence value" % recurrence )
self . _recurrence = recurrence |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.