signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def __run_blast_select_loop ( input_file , popens , fields ) :
'''Run the select ( 2 ) loop to handle blast I / O to the given set of Popen
objects .
Yields records back that have been read from blast processes .''' | def make_nonblocking ( f ) :
fl = fcntl . fcntl ( f . fileno ( ) , fcntl . F_GETFL )
fl |= os . O_NONBLOCK
fcntl . fcntl ( f . fileno ( ) , fcntl . F_SETFL , fl )
rfds = set ( )
wfds = set ( )
fd_map = { }
for p in popens :
make_nonblocking ( p . stdout )
rfds . add ( p . stdout . fileno ( ) )
fd_map [ p . stdout . fileno ( ) ] = { 'popen' : p , 'query_buffer' : '' , 'result_buffer' : '' }
make_nonblocking ( p . stdin )
wfds . add ( p . stdin . fileno ( ) )
fd_map [ p . stdin . fileno ( ) ] = fd_map [ p . stdout . fileno ( ) ]
while len ( rfds ) + len ( wfds ) > 0 : # XXX : Should we be tracking excepted file descriptors as well ?
rl , wl , _ = select . select ( rfds , wfds , [ ] )
# For each of our readable blast processes , read response
# records and emit them
for fd in rl :
rs = fd_map [ fd ] [ 'result_buffer' ]
rbuf = os . read ( fd , select . PIPE_BUF )
# The blast process has finished emitting records . Stop
# attempting to read from or write to it . If we have
# excess data in our result _ buffer , c ' est la vie .
if rbuf == '' :
p = fd_map [ fd ] [ 'popen' ]
rfds . remove ( p . stdout . fileno ( ) )
p . stdout . close ( )
if not p . stdin . closed :
wfds . remove ( p . stdin . fileno ( ) )
p . stdin . close ( )
continue
rs += rbuf
while True :
rec , rs = __read_single_query_result ( rs , fields )
if rec is None :
break
yield rec
fd_map [ fd ] [ 'result_buffer' ] = rs
# For each of our writable blast processes , grab a new query
# sequence and send it off to them .
for fd in wl :
qs = fd_map [ fd ] [ 'query_buffer' ]
if not qs :
ql = __read_single_fasta_query_lines ( input_file )
# No more input records available . Close the pipe to
# signal this to the blast process .
if ql is None :
p = fd_map [ fd ] [ 'popen' ]
wfds . remove ( p . stdin . fileno ( ) )
p . stdin . close ( )
continue
qs = '' . join ( ql )
# XXX : For some reason , despite select ( 2 ) indicating that
# this file descriptor is writable , writes can fail
# with EWOULDBLOCK . Handle this gracefully .
try :
written = os . write ( fd , qs )
qs = qs [ written : ]
except OSError , e :
assert e . errno == errno . EWOULDBLOCK
fd_map [ fd ] [ 'query_buffer' ] = qs |
def get ( self , ids , ** kwargs ) :
"""Method to get environments vip by their ids
: param ids : List containing identifiers of environments vip
: param include : Array containing fields to include on response .
: param exclude : Array containing fields to exclude on response .
: param fields : Array containing fields to override default fields .
: param kind : Determine if result will be detailed ( ' detail ' )
or basic ( ' basic ' ) .
: return : Dict containing environments vip""" | uri = build_uri_with_ids ( 'api/v3/environment-vip/%s/' , ids )
return super ( ApiEnvironmentVip , self ) . get ( self . prepare_url ( uri , kwargs ) ) |
def _stack_values_to_string ( self , stack_values ) :
"""Convert each stack value to a string
: param stack _ values : A list of values
: return : The converted string""" | strings = [ ]
for stack_value in stack_values :
if self . solver . symbolic ( stack_value ) :
concretized_value = "SYMBOLIC - %s" % repr ( stack_value )
else :
if len ( self . solver . eval_upto ( stack_value , 2 ) ) == 2 :
concretized_value = repr ( stack_value )
else :
concretized_value = repr ( stack_value )
strings . append ( concretized_value )
return " .. " . join ( strings ) |
def has_abiext ( self , ext , single_file = True ) :
"""Returns the absolute path of the ABINIT file with extension ext .
Support both Fortran files and netcdf files . In the later case ,
we check whether a file with extension ext + " . nc " is present
in the directory . Returns empty string is file is not present .
Raises :
` ValueError ` if multiple files with the given ext are found .
This implies that this method is not compatible with multiple datasets .""" | if ext != "abo" :
ext = ext if ext . startswith ( '_' ) else '_' + ext
files = [ ]
for f in self . list_filepaths ( ) : # For the time being , we ignore DDB files in nc format .
if ext == "_DDB" and f . endswith ( ".nc" ) :
continue
# Ignore BSE text files e . g . GW _ NLF _ MDF
if ext == "_MDF" and not f . endswith ( ".nc" ) :
continue
# Ignore DDK . nc files ( temporary workaround for v8.8.2 in which
# the DFPT code produces a new file with DDK . nc extension that enters
# into conflict with AbiPy convention .
if ext == "_DDK" and f . endswith ( ".nc" ) :
continue
if f . endswith ( ext ) or f . endswith ( ext + ".nc" ) :
files . append ( f )
# This should fix the problem with the 1WF files in which the file extension convention is broken
if not files :
files = [ f for f in self . list_filepaths ( ) if fnmatch ( f , "*%s*" % ext ) ]
if not files :
return ""
if len ( files ) > 1 and single_file : # ABINIT users must learn that multiple datasets are bad !
raise ValueError ( "Found multiple files with the same extensions:\n %s\n" % files + "Please avoid using multiple datasets!" )
return files [ 0 ] if single_file else files |
def to_file_object ( self , name , out_dir ) :
"""Dump to a pickle file and return an File object reference of this list
Parameters
name : str
An identifier of this file . Needs to be unique .
out _ dir : path
path to place this file
Returns
file : AhopeFile""" | make_analysis_dir ( out_dir )
file_ref = File ( 'ALL' , name , self . get_times_covered_by_files ( ) , extension = '.pkl' , directory = out_dir )
self . dump ( file_ref . storage_path )
return file_ref |
def convert_weights_and_inputs ( node , ** kwargs ) :
"""Helper function to convert weights and inputs .""" | name , _ , _ = get_inputs ( node , kwargs )
if kwargs [ "is_input" ] is False :
weights = kwargs [ "weights" ]
initializer = kwargs [ "initializer" ]
np_arr = weights [ name ]
data_type = onnx . mapping . NP_TYPE_TO_TENSOR_TYPE [ np_arr . dtype ]
dims = np . shape ( np_arr )
tensor_node = onnx . helper . make_tensor_value_info ( name , data_type , dims )
initializer . append ( onnx . helper . make_tensor ( name = name , data_type = data_type , dims = dims , vals = np_arr . flatten ( ) . tolist ( ) , raw = False , ) )
return [ tensor_node ]
else :
tval_node = onnx . helper . make_tensor_value_info ( name , kwargs [ "in_type" ] , kwargs [ "in_shape" ] )
return [ tval_node ] |
def to_curve_spline ( obj ) :
'''to _ curve _ spline ( obj ) obj if obj is a curve spline and otherwise attempts to coerce obj into a
curve spline , raising an error if it cannot .''' | if is_curve_spline ( obj ) :
return obj
elif is_tuple ( obj ) and len ( obj ) == 2 :
( crds , opts ) = obj
else :
( crds , opts ) = ( obj , { } )
if pimms . is_matrix ( crds ) or is_curve_spline ( crds ) :
crds = [ crds ]
spls = [ c for c in crds if is_curve_spline ( c ) ]
opts = dict ( opts )
if 'weights' not in opts and len ( spls ) == len ( crds ) :
if all ( c . weights is not None for c in crds ) :
opts [ 'weights' ] = np . concatenate ( [ c . weights for c in crds ] )
if 'order' not in opts and len ( spls ) > 0 :
opts [ 'order' ] = np . min ( [ c . order for c in spls ] )
if 'smoothing' not in opts and len ( spls ) > 0 :
sm = set ( [ c . smoothing for c in spls ] )
if len ( sm ) == 1 :
opts [ 'smoothing' ] = list ( sm ) [ 0 ]
else :
opts [ 'smoothing' ] = None
crds = [ x . crds if is_curve_spline ( crds ) else np . asarray ( x ) for x in crds ]
crds = [ x if x . shape [ 0 ] == 2 else x . T for x in crds ]
crds = np . hstack ( crds )
return curve_spline ( crds , ** opts ) |
def maps_get_rules_output_rules_rulename ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
maps_get_rules = ET . Element ( "maps_get_rules" )
config = maps_get_rules
output = ET . SubElement ( maps_get_rules , "output" )
rules = ET . SubElement ( output , "rules" )
rulename = ET . SubElement ( rules , "rulename" )
rulename . text = kwargs . pop ( 'rulename' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def check_length_of_shape_or_intercept_names ( name_list , num_alts , constrained_param , list_title ) :
"""Ensures that the length of the parameter names matches the number of
parameters that will be estimated . Will raise a ValueError otherwise .
Parameters
name _ list : list of strings .
Each element should be the name of a parameter that is to be estimated .
num _ alts : int .
Should be the total number of alternatives in the universal choice set
for this dataset .
constrainted _ param : { 0 , 1 , True , False }
Indicates whether ( 1 or True ) or not ( 0 or False ) one of the type of
parameters being estimated will be constrained . For instance ,
constraining one of the intercepts .
list _ title : str .
Should specify the type of parameters whose names are being checked .
Examples include ' intercept _ params ' or ' shape _ params ' .
Returns
None .""" | if len ( name_list ) != ( num_alts - constrained_param ) :
msg_1 = "{} is of the wrong length:" . format ( list_title )
msg_2 = "len({}) == {}" . format ( list_title , len ( name_list ) )
correct_length = num_alts - constrained_param
msg_3 = "The correct length is: {}" . format ( correct_length )
total_msg = "\n" . join ( [ msg_1 , msg_2 , msg_3 ] )
raise ValueError ( total_msg )
return None |
def remove_by ( keys , original ) :
"""Remove items in a list according to another list .""" | for i in [ original [ index ] for index , needed in enumerate ( keys ) if not needed ] :
original . remove ( i ) |
def refund ( self , idempotency_key = None , ** params ) :
"""Return a deferred .""" | headers = populate_headers ( idempotency_key )
url = self . instance_url ( ) + '/refund'
d = self . request ( 'post' , url , params , headers )
return d . addCallback ( self . refresh_from ) . addCallback ( lambda _ : self ) |
def sample ( problem , N , calc_second_order = True , seed = None ) :
"""Generates model inputs using Saltelli ' s extension of the Sobol sequence .
Returns a NumPy matrix containing the model inputs using Saltelli ' s sampling
scheme . Saltelli ' s scheme extends the Sobol sequence in a way to reduce
the error rates in the resulting sensitivity index calculations . If
calc _ second _ order is False , the resulting matrix has N * ( D + 2)
rows , where D is the number of parameters . If calc _ second _ order is True ,
the resulting matrix has N * ( 2D + 2 ) rows . These model inputs are
intended to be used with : func : ` SALib . analyze . sobol . analyze ` .
Parameters
problem : dict
The problem definition
N : int
The number of samples to generate
calc _ second _ order : bool
Calculate second - order sensitivities ( default True )""" | if seed :
np . random . seed ( seed )
D = problem [ 'num_vars' ]
groups = problem . get ( 'groups' )
if not groups :
Dg = problem [ 'num_vars' ]
else :
Dg = len ( set ( groups ) )
G , group_names = compute_groups_matrix ( groups )
# How many values of the Sobol sequence to skip
skip_values = 1000
# Create base sequence - could be any type of sampling
base_sequence = sobol_sequence . sample ( N + skip_values , 2 * D )
if calc_second_order :
saltelli_sequence = np . zeros ( [ ( 2 * Dg + 2 ) * N , D ] )
else :
saltelli_sequence = np . zeros ( [ ( Dg + 2 ) * N , D ] )
index = 0
for i in range ( skip_values , N + skip_values ) : # Copy matrix " A "
for j in range ( D ) :
saltelli_sequence [ index , j ] = base_sequence [ i , j ]
index += 1
# Cross - sample elements of " B " into " A "
for k in range ( Dg ) :
for j in range ( D ) :
if ( not groups and j == k ) or ( groups and group_names [ k ] == groups [ j ] ) :
saltelli_sequence [ index , j ] = base_sequence [ i , j + D ]
else :
saltelli_sequence [ index , j ] = base_sequence [ i , j ]
index += 1
# Cross - sample elements of " A " into " B "
# Only needed if you ' re doing second - order indices ( true by default )
if calc_second_order :
for k in range ( Dg ) :
for j in range ( D ) :
if ( not groups and j == k ) or ( groups and group_names [ k ] == groups [ j ] ) :
saltelli_sequence [ index , j ] = base_sequence [ i , j ]
else :
saltelli_sequence [ index , j ] = base_sequence [ i , j + D ]
index += 1
# Copy matrix " B "
for j in range ( D ) :
saltelli_sequence [ index , j ] = base_sequence [ i , j + D ]
index += 1
if not problem . get ( 'dists' ) : # scaling values out of 0-1 range with uniform distributions
scale_samples ( saltelli_sequence , problem [ 'bounds' ] )
return saltelli_sequence
else : # scaling values to other distributions based on inverse CDFs
scaled_saltelli = nonuniform_scale_samples ( saltelli_sequence , problem [ 'bounds' ] , problem [ 'dists' ] )
return scaled_saltelli |
def parse_oxi_states ( self , data ) :
"""Parse oxidation states from data dictionary""" | try :
oxi_states = { data [ "_atom_type_symbol" ] [ i ] : str2float ( data [ "_atom_type_oxidation_number" ] [ i ] ) for i in range ( len ( data [ "_atom_type_symbol" ] ) ) }
# attempt to strip oxidation state from _ atom _ type _ symbol
# in case the label does not contain an oxidation state
for i , symbol in enumerate ( data [ "_atom_type_symbol" ] ) :
oxi_states [ re . sub ( r"\d?[\+,\-]?$" , "" , symbol ) ] = str2float ( data [ "_atom_type_oxidation_number" ] [ i ] )
except ( ValueError , KeyError ) :
oxi_states = None
return oxi_states |
def rest_of_string ( self , offset = 0 ) :
"""A copy of the current position till the end of the source string .""" | if self . has_space ( offset = offset ) :
return self . string [ self . pos + offset : ]
else :
return '' |
def data_fetch ( self , url , task ) :
'''A fake fetcher for dataurl''' | self . on_fetch ( 'data' , task )
result = { }
result [ 'orig_url' ] = url
result [ 'content' ] = dataurl . decode ( url )
result [ 'headers' ] = { }
result [ 'status_code' ] = 200
result [ 'url' ] = url
result [ 'cookies' ] = { }
result [ 'time' ] = 0
result [ 'save' ] = task . get ( 'fetch' , { } ) . get ( 'save' )
if len ( result [ 'content' ] ) < 70 :
logger . info ( "[200] %s:%s %s 0s" , task . get ( 'project' ) , task . get ( 'taskid' ) , url )
else :
logger . info ( "[200] %s:%s data:,%s...[content:%d] 0s" , task . get ( 'project' ) , task . get ( 'taskid' ) , result [ 'content' ] [ : 70 ] , len ( result [ 'content' ] ) )
return result |
def output ( self , kind , line ) :
"* line * should be bytes" | self . destination . write ( b'' . join ( [ self . _cyan , b't=%07d' % ( time . time ( ) - self . _t0 ) , self . _reset , self . _kind_prefixes [ kind ] , self . markers [ kind ] , line , self . _reset , ] ) )
self . destination . flush ( ) |
def fit ( self ) :
"""Do the fitting . Does least square fitting . If you want to use custom
fitting , must override this .""" | # the objective function that will be minimized in the least square
# fitting
objective_func = lambda pars , x , y : y - self . _func ( x , pars )
self . _params = self . _initial_guess ( )
self . eos_params , ierr = leastsq ( objective_func , self . _params , args = ( self . volumes , self . energies ) )
# e0 , b0 , b1 , v0
self . _params = self . eos_params
if ierr not in [ 1 , 2 , 3 , 4 ] :
raise EOSError ( "Optimal parameters not found" ) |
def load ( filename = None , url = r"https://raw.githubusercontent.com/googlei18n/emoji4unicode/master/data/emoji4unicode.xml" , loader_class = None ) :
u"""load google ' s ` emoji4unicode ` project ' s xml file . must call this method first to use ` e4u ` library . this method never work twice if you want to reload , use ` e4u . reload ( ) ` insted .""" | if not has_loaded ( ) :
reload ( filename , url , loader_class ) |
def validate_input_source_config_source_candidate_candidate ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
validate = ET . Element ( "validate" )
config = validate
input = ET . SubElement ( validate , "input" )
source = ET . SubElement ( input , "source" )
config_source = ET . SubElement ( source , "config-source" )
candidate = ET . SubElement ( config_source , "candidate" )
candidate = ET . SubElement ( candidate , "candidate" )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def cos_well ( f = Ellipsis , width = np . pi / 2 , offset = 0 , scale = 1 ) :
'''cos _ well ( ) yields a potential function g ( x ) that calculates 0.5 * ( 1 - cos ( x ) ) for - pi / 2 < = x
< = pi / 2 and is 1 outside of that range .
The full formulat of the cosine well is , including optional arguments :
scale / 2 * ( 1 - cos ( ( x - offset ) / ( width / pi ) ) )
The following optional arguments may be given :
* width ( default : pi ) specifies that the frequency of the cos - curve should be pi / width ; the
width is the distance between the points on the cos - curve with the value of 1.
* offset ( default : 0 ) specifies the offset of the minimum value of the coine curve on the
x - axis .
* scale ( default : 1 ) specifies the height of the cosine well .''' | f = to_potential ( f )
freq = np . pi / width * 2
( xmn , xmx ) = ( offset - width / 2 , offset + width / 2 )
F = piecewise ( scale , ( ( xmn , xmx ) , scale / 2 * ( 1 - cos ( freq * ( identity - offset ) ) ) ) )
if is_const_potential ( f ) :
return const_potential ( F . value ( f . c ) )
elif is_identity_potential ( f ) :
return F
else :
return compose ( F , f ) |
def get ( self , url , headers = None , kwargs = None ) :
"""Make a GET request .
To make a GET request pass , ` ` url ` `
: param url : ` ` str ` `
: param headers : ` ` dict ` `
: param kwargs : ` ` dict ` `""" | return self . _request ( method = 'get' , url = url , headers = headers , kwargs = kwargs ) |
def extraSelections ( self , qpart , block , columnIndex ) :
"""List of QTextEdit . ExtraSelection ' s , which highlighte brackets""" | blockText = block . text ( )
if columnIndex < len ( blockText ) and blockText [ columnIndex ] in self . _ALL_BRACKETS and qpart . isCode ( block , columnIndex ) :
return self . _highlightBracket ( blockText [ columnIndex ] , qpart , block , columnIndex )
elif columnIndex > 0 and blockText [ columnIndex - 1 ] in self . _ALL_BRACKETS and qpart . isCode ( block , columnIndex - 1 ) :
return self . _highlightBracket ( blockText [ columnIndex - 1 ] , qpart , block , columnIndex - 1 )
else :
self . currentMatchedBrackets = None
return [ ] |
def call_graphviz_dot ( src , fmt ) :
"""Call dot command , and provide helpful error message if we
cannot find it .""" | try :
svg = dot ( src , T = fmt )
except OSError as e : # pragma : nocover
if e . errno == 2 :
cli . error ( """
cannot find 'dot'
pydeps calls dot (from graphviz) to create svg diagrams,
please make sure that the dot executable is available
on your path.
""" )
raise
return svg |
def _set_xfp ( self , v , load = False ) :
"""Setter method for xfp , mapped from YANG variable / brocade _ interface _ ext _ rpc / get _ media _ detail / output / interface / xfp ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ xfp is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ xfp ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = xfp . xfp , is_container = 'container' , presence = False , yang_name = "xfp" , rest_name = "xfp" , parent = self , choice = ( u'interface-identifier' , u'xfp' ) , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = None , namespace = 'urn:brocade.com:mgmt:brocade-interface-ext' , defining_module = 'brocade-interface-ext' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """xfp must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=xfp.xfp, is_container='container', presence=False, yang_name="xfp", rest_name="xfp", parent=self, choice=(u'interface-identifier', u'xfp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='container', is_config=True)""" , } )
self . __xfp = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def _pystmark_call ( self , method , * args , ** kwargs ) :
'''Wraps a call to the pystmark Simple API , adding configured
settings''' | kwargs = self . _apply_config ( ** kwargs )
return method ( * args , ** kwargs ) |
def login ( self , username , password = None , email = None , registry = None , reauth = False , ** kwargs ) :
"""Login to a Docker registry server .
: param username : User name for login .
: type username : unicode | str
: param password : Login password ; may be ` ` None ` ` if blank .
: type password : unicode | str
: param email : Optional ; email address for login .
: type email : unicode | str
: param registry : Optional registry URL to log in to . Uses the Docker index by default .
: type registry : unicode | str
: param reauth : Re - authenticate , even if the login has been successful before .
: type reauth : bool
: param kwargs : Additional kwargs to : meth : ` docker . client . Client . login ` .
: return : ` ` True ` ` if the login has succeeded , or if it has not been necessary as it succeeded before . ` ` False ` `
otherwise .
: rtype : bool""" | response = super ( DockerClientWrapper , self ) . login ( username , password , email , registry , reauth = reauth , ** kwargs )
return response . get ( 'Status' ) == 'Login Succeeded' or response . get ( 'username' ) == username |
def search ( self , initial_ids , initial_cache ) :
"""Beam search for sequences with highest scores .""" | state , state_shapes = self . _create_initial_state ( initial_ids , initial_cache )
finished_state = tf . while_loop ( self . _continue_search , self . _search_step , loop_vars = [ state ] , shape_invariants = [ state_shapes ] , parallel_iterations = 1 , back_prop = False )
finished_state = finished_state [ 0 ]
alive_seq = finished_state [ _StateKeys . ALIVE_SEQ ]
alive_log_probs = finished_state [ _StateKeys . ALIVE_LOG_PROBS ]
finished_seq = finished_state [ _StateKeys . FINISHED_SEQ ]
finished_scores = finished_state [ _StateKeys . FINISHED_SCORES ]
finished_flags = finished_state [ _StateKeys . FINISHED_FLAGS ]
# Account for corner case where there are no finished sequences for a
# particular batch item . In that case , return alive sequences for that batch
# item .
finished_seq = tf . where ( tf . reduce_any ( finished_flags , 1 ) , finished_seq , alive_seq )
finished_scores = tf . where ( tf . reduce_any ( finished_flags , 1 ) , finished_scores , alive_log_probs )
return finished_seq , finished_scores |
def node ( self , nodeid ) :
"""Creates a new node with the specified name , with ` MockSocket ` instances as incoming and outgoing sockets .
Returns the implementation object created for the node from the cls , args and address specified , and the sockets .
` cls ` must be a callable that takes the insock and outsock , and the specified args and kwargs .""" | _assert_valid_nodeid ( nodeid )
# addr = ' tcp : / / ' + nodeid
# insock = MockInSocket ( addEndpoints = lambda endpoints : self . bind ( addr , insock , endpoints ) )
# outsock = lambda : MockOutSocket ( addr , self )
return Node ( hub = Hub ( nodeid = nodeid ) ) |
def get_layer_names ( self ) :
""": return : Names of all the layers kept by Keras""" | layer_names = [ x . name for x in self . model . layers ]
return layer_names |
def convert ( self , mode ) :
"""Convert the current image to the given * mode * . See : class : ` Image `
for a list of available modes .""" | if mode == self . mode :
return
if mode not in [ "L" , "LA" , "RGB" , "RGBA" , "YCbCr" , "YCbCrA" , "P" , "PA" ] :
raise ValueError ( "Mode %s not recognized." % ( mode ) )
if self . is_empty ( ) :
self . mode = mode
return
if mode == self . mode + "A" :
self . channels . append ( np . ma . ones ( self . channels [ 0 ] . shape ) )
if self . fill_value is not None :
self . fill_value += [ 1 ]
self . mode = mode
elif mode + "A" == self . mode :
self . channels = self . channels [ : - 1 ]
if self . fill_value is not None :
self . fill_value = self . fill_value [ : - 1 ]
self . mode = mode
elif mode . endswith ( "A" ) and not self . mode . endswith ( "A" ) :
self . convert ( self . mode + "A" )
self . convert ( mode )
elif self . mode . endswith ( "A" ) and not mode . endswith ( "A" ) :
self . convert ( self . mode [ : - 1 ] )
self . convert ( mode )
else :
cases = { "RGB" : { "YCbCr" : self . _rgb2ycbcr , "L" : self . _rgb2l , "P" : self . _to_p } , "RGBA" : { "YCbCrA" : self . _rgb2ycbcr , "LA" : self . _rgb2l , "PA" : self . _to_p } , "YCbCr" : { "RGB" : self . _ycbcr2rgb , "L" : self . _ycbcr2l , "P" : self . _to_p } , "YCbCrA" : { "RGBA" : self . _ycbcr2rgb , "LA" : self . _ycbcr2l , "PA" : self . _to_p } , "L" : { "RGB" : self . _l2rgb , "YCbCr" : self . _l2ycbcr , "P" : self . _to_p } , "LA" : { "RGBA" : self . _l2rgb , "YCbCrA" : self . _l2ycbcr , "PA" : self . _to_p } , "P" : { "RGB" : self . _from_p , "YCbCr" : self . _from_p , "L" : self . _from_p } , "PA" : { "RGBA" : self . _from_p , "YCbCrA" : self . _from_p , "LA" : self . _from_p } }
try :
cases [ self . mode ] [ mode ] ( mode )
except KeyError :
raise ValueError ( "Conversion from %s to %s not implemented !" % ( self . mode , mode ) ) |
def from_dict ( cls , d ) :
"""Returns a COHP object from a dict representation of the COHP .""" | if "ICOHP" in d :
icohp = { Spin ( int ( key ) ) : np . array ( val ) for key , val in d [ "ICOHP" ] . items ( ) }
else :
icohp = None
return Cohp ( d [ "efermi" ] , d [ "energies" ] , { Spin ( int ( key ) ) : np . array ( val ) for key , val in d [ "COHP" ] . items ( ) } , icohp = icohp , are_coops = d [ "are_coops" ] ) |
def download_badge ( test_stats , # type : TestStats
dest_folder = 'reports/junit' # type : str
) :
"""Downloads the badge corresponding to the provided success percentage , from https : / / img . shields . io .
: param test _ stats :
: param dest _ folder :
: return :""" | if not path . exists ( dest_folder ) :
makedirs ( dest_folder )
# , exist _ ok = True ) not python 2 compliant
if test_stats . success_percentage < 50 :
color = 'red'
elif test_stats . success_percentage < 75 :
color = 'orange'
elif test_stats . success_percentage < 90 :
color = 'green'
else :
color = 'brightgreen'
left_txt = "tests"
# right _ txt = " % s % % " % test _ stats . success _ percentage
right_txt = "%s/%s" % ( test_stats . success , test_stats . runned )
url = 'https://img.shields.io/badge/%s-%s-%s.svg' % ( left_txt , quote_plus ( right_txt ) , color )
dest_file = path . join ( dest_folder , 'junit-badge.svg' )
print ( 'Generating junit badge from : ' + url )
response = requests . get ( url , stream = True )
with open ( dest_file , 'wb' ) as out_file :
response . raw . decode_content = True
shutil . copyfileobj ( response . raw , out_file )
del response |
def enum_value ( self ) :
"""Return the value of an enum constant .""" | if not hasattr ( self , '_enum_value' ) :
assert self . kind == CursorKind . ENUM_CONSTANT_DECL
# Figure out the underlying type of the enum to know if it
# is a signed or unsigned quantity .
underlying_type = self . type
if underlying_type . kind == TypeKind . ENUM :
underlying_type = underlying_type . get_declaration ( ) . enum_type
if underlying_type . kind in ( TypeKind . CHAR_U , TypeKind . UCHAR , TypeKind . CHAR16 , TypeKind . CHAR32 , TypeKind . USHORT , TypeKind . UINT , TypeKind . ULONG , TypeKind . ULONGLONG , TypeKind . UINT128 ) :
self . _enum_value = conf . lib . clang_getEnumConstantDeclUnsignedValue ( self )
else :
self . _enum_value = conf . lib . clang_getEnumConstantDeclValue ( self )
return self . _enum_value |
def LoadFromXml ( self , node ) :
"""Method updates the object from the xml .""" | import os
self . classId = node . localName
metaClassId = UcsUtils . FindClassIdInMoMetaIgnoreCase ( self . classId )
if metaClassId :
self . classId = metaClassId
if node . hasAttribute ( NamingPropertyId . DN ) :
self . dn = node . getAttribute ( NamingPropertyId . DN )
if self . dn :
self . rn = os . path . basename ( self . dn )
# Write the attribute and value to dictionary properties , as it is .
self . WriteToAttributes ( node )
# Run the LoadFromXml for each childNode recursively and populate child list too .
if ( node . hasChildNodes ( ) ) : # childList = node . _ get _ childNodes ( )
# childCount = childList . _ get _ length ( )
childList = node . childNodes
childCount = len ( childList )
for i in range ( childCount ) :
childNode = childList . item ( i )
if ( childNode . nodeType != Node . ELEMENT_NODE ) :
continue
c = _GenericMO ( )
self . child . append ( c )
c . LoadFromXml ( childNode ) |
def int2str ( self , num ) :
"""Converts an integer into a string .
: param num : A numeric value to be converted to another base as a
string .
: rtype : string
: raise TypeError : when * num * isn ' t an integer
: raise ValueError : when * num * isn ' t positive""" | if int ( num ) != num :
raise TypeError ( 'number must be an integer' )
if num < 0 :
raise ValueError ( 'number must be positive' )
radix , alphabet = self . radix , self . alphabet
if radix in ( 8 , 10 , 16 ) and alphabet [ : radix ] . lower ( ) == BASE85 [ : radix ] . lower ( ) :
return ( { 8 : '%o' , 10 : '%d' , 16 : '%x' } [ radix ] % num ) . upper ( )
ret = ''
while True :
ret = alphabet [ num % radix ] + ret
if num < radix :
break
num //= radix
return ret |
def delete_object ( self , obj , post_delete = False ) :
"""Delete an object with Discipline
Only argument is a Django object . Analogous to Editor . save _ object .""" | # Collect related objects that will be deleted by cascading
links = [ rel . get_accessor_name ( ) for rel in obj . _meta . get_all_related_objects ( ) ]
# Recursively delete each of them
for link in links :
objects = getattr ( obj , link ) . all ( )
for o in objects :
self . delete_object ( o , post_delete )
# Delete the actual object
self . _delete_object ( obj , post_delete ) |
def format_cert_name ( env = '' , account = '' , region = '' , certificate = None ) :
"""Format the SSL certificate name into ARN for ELB .
Args :
env ( str ) : Account environment name
account ( str ) : Account number for ARN
region ( str ) : AWS Region .
certificate ( str ) : Name of SSL certificate
Returns :
str : Fully qualified ARN for SSL certificate
None : Certificate is not desired""" | cert_name = None
if certificate :
if certificate . startswith ( 'arn' ) :
LOG . info ( "Full ARN provided...skipping lookup." )
cert_name = certificate
else :
generated_cert_name = generate_custom_cert_name ( env , region , account , certificate )
if generated_cert_name :
LOG . info ( "Found generated certificate %s from template" , generated_cert_name )
cert_name = generated_cert_name
else :
LOG . info ( "Using default certificate name logic" )
cert_name = ( 'arn:aws:iam::{account}:server-certificate/{name}' . format ( account = account , name = certificate ) )
LOG . debug ( 'Certificate name: %s' , cert_name )
return cert_name |
def total_num_violations ( self ) :
"""Returns the total number of lines in the diff
that are in violation .""" | return sum ( len ( summary . lines ) for summary in self . _diff_violations ( ) . values ( ) ) |
def interpolateall ( table , fmt , ** kwargs ) :
"""Convenience function to interpolate all values in all fields using
the ` fmt ` string .
The ` ` where ` ` keyword argument can be given with a callable or expression
which is evaluated on each row and which should return True if the
conversion should be applied on that row , else False .""" | conv = lambda v : fmt % v
return convertall ( table , conv , ** kwargs ) |
def cwd ( self ) :
"""Change to URL parent directory . Return filename of last path
component .""" | path = self . urlparts [ 2 ] . encode ( self . filename_encoding , 'replace' )
dirname = path . strip ( '/' )
dirs = dirname . split ( '/' )
filename = dirs . pop ( )
self . url_connection . cwd ( '/' )
for d in dirs :
self . url_connection . cwd ( d )
return filename |
def is_subdomain_zonefile_hash ( fqn , zonefile_hash , db_path = None , zonefiles_dir = None ) :
"""Static method for getting all historic zone file hashes for a subdomain""" | opts = get_blockstack_opts ( )
if not is_subdomains_enabled ( opts ) :
return [ ]
if db_path is None :
db_path = opts [ 'subdomaindb_path' ]
if zonefiles_dir is None :
zonefiles_dir = opts [ 'zonefiles' ]
db = SubdomainDB ( db_path , zonefiles_dir )
zonefile_hashes = db . is_subdomain_zonefile_hash ( fqn , zonefile_hash )
return zonefile_hashes |
def from_config ( cls , config , weights = None , weights_loader = None ) :
"""deserialize from a dict returned by get _ config ( ) .
Parameters
config : dict
weights : list of array , optional
Network weights to restore
weights _ loader : callable , optional
Function to call ( no arguments ) to load weights when needed
Returns
Class1NeuralNetwork""" | config = dict ( config )
instance = cls ( ** config . pop ( 'hyperparameters' ) )
instance . __dict__ . update ( config )
instance . network_weights = weights
instance . network_weights_loader = weights_loader
instance . prediction_cache = weakref . WeakKeyDictionary ( )
return instance |
def trigger_all_callbacks ( self , callbacks = None ) :
"""Trigger callbacks for all keys on all or a subset of subscribers .
: param Iterable callbacks : list of callbacks or none for all subscribed
: rtype : Iterable [ tornado . concurrent . Future ]""" | return [ ret for key in self for ret in self . trigger_callbacks ( key , callbacks = None ) ] |
def log_benchmark ( fn , start , end ) :
"""Log a given function and how long the function takes in seconds
: param str fn : Function name
: param float start : Function start time
: param float end : Function end time
: return none :""" | elapsed = round ( end - start , 2 )
line = ( "Benchmark - Function: {} , Time: {} seconds" . format ( fn , elapsed ) )
return line |
def _GetAttributes ( self ) :
"""Retrieves the attributes .
Returns :
list [ NTFSAttribute ] : attributes .""" | if self . _attributes is None :
self . _attributes = [ ]
for fsntfs_attribute in self . _fsntfs_file_entry . attributes :
attribute_class = self . _ATTRIBUTE_TYPE_CLASS_MAPPINGS . get ( fsntfs_attribute . attribute_type , NTFSAttribute )
attribute_object = attribute_class ( fsntfs_attribute )
self . _attributes . append ( attribute_object )
return self . _attributes |
def list_drafts ( self ) :
"""A filterable list views of layers , returning the draft version of each layer .
If the most recent version of a layer or table has been published already ,
it won ’ t be returned here .""" | target_url = self . client . get_url ( 'LAYER' , 'GET' , 'multidraft' )
return base . Query ( self , target_url ) |
def deconv_rl ( data , h , Niter = 10 ) :
"""richardson lucy deconvolution of data with psf h
using spatial convolutions ( h should be small then )""" | if isinstance ( data , np . ndarray ) :
return _deconv_rl_np ( data , h , Niter )
elif isinstance ( data , OCLArray ) :
return _deconv_rl_gpu_conv ( data , h , Niter )
else :
raise TypeError ( "array argument (1) has bad type: %s" % type ( arr_obj ) ) |
def copy ( source_backend_names , bucket_names , static_bucket_name , s3_endpoint , s3_profile , s3_bucket_policy_file , rclone , output ) :
"""Copy files to S3.
This command copies files to S3 and records the necessary database changes
in a JSONL file .
Multiple bucket names can be specified ; in that case the bucket name can change
based on the year a file was created in . The last bucket name will be the default ,
while any other bucket name must include a conditional indicating when to use it :
- B ' < 2001 : indico - pre - 2001'
- B ' < 2009 : indico - < year > '
- B ' indico - < year > - < month > '
The static bucket name cannot contain any placeholders .
The indico storage backend will get the same name as the bucket by default ,
but this can be overridden , e . g . ` - B ' indico - < year > / s3 - < year > ' ` would name
the bucket ' indico - 2018 ' but use a backend named ' s3-2018 ' . It is your
responsibility to ensure that placeholders match between the two names .
S3 credentials should be specified in the usual places , i . e .
` ~ / . aws / credentials ` for regular S3 access and ` ~ / . config / rclone / rclone . conf `
when using rclone .""" | bucket_names = [ tuple ( x . split ( '/' , 1 ) ) if '/' in x else ( x , x . split ( ':' , 1 ) [ - 1 ] ) for x in bucket_names ]
if ':' in bucket_names [ - 1 ] [ 0 ] :
raise click . UsageError ( 'Last bucket name cannot contain criteria' )
if not all ( ':' in x [ 0 ] for x in bucket_names [ : - 1 ] ) :
raise click . UsageError ( 'All but the last bucket name need to contain criteria' )
matches = [ ( re . match ( r'^(<|>|==|<=|>=)\s*(\d{4}):(.+)$' , name ) , backend ) for name , backend in bucket_names [ : - 1 ] ]
if not all ( x [ 0 ] for x in matches ) :
raise click . UsageError ( "Could not parse '{}'" . format ( bucket_names [ matches . index ( None ) ] ) )
criteria = [ ( match . groups ( ) , backend ) for match , backend in matches ]
# Build and compile a function to get the bucket / backend name to avoid
# processing the criteria for every single file ( can be millions for large
# instances )
code = [ 'def get_bucket_name(dt):' ]
if criteria :
for i , ( ( op , value , bucket ) , backend ) in enumerate ( criteria ) :
code . append ( ' {}if dt.year {} {}:' . format ( 'el' if i else '' , op , value ) )
code . append ( ' bucket, backend = {!r}' . format ( ( bucket , backend ) ) )
code . append ( ' else:' )
code . append ( ' bucket, backend = {!r}' . format ( bucket_names [ - 1 ] ) )
else :
code . append ( ' bucket, backend = {!r}' . format ( bucket_names [ - 1 ] ) )
code . append ( ' bucket = bucket.replace("<year>", dt.strftime("%Y"))' )
code . append ( ' bucket = bucket.replace("<month>", dt.strftime("%m"))' )
code . append ( ' bucket = bucket.replace("<week>", dt.strftime("%W"))' )
code . append ( ' backend = backend.replace("<year>", dt.strftime("%Y"))' )
code . append ( ' backend = backend.replace("<month>", dt.strftime("%m"))' )
code . append ( ' backend = backend.replace("<week>", dt.strftime("%W"))' )
code . append ( ' return bucket, backend' )
d = { }
exec '\n' . join ( code ) in d
if not source_backend_names :
source_backend_names = [ x for x in config . STORAGE_BACKENDS if not isinstance ( get_storage ( x ) , S3StorageBase ) ]
if rclone :
invalid = [ x for x in source_backend_names if not isinstance ( get_storage ( x ) , FileSystemStorage ) ]
if invalid :
click . secho ( 'Found unsupported storage backends: {}' . format ( ', ' . join ( sorted ( invalid ) ) ) , fg = 'yellow' )
click . secho ( 'The backends might not work together with `--rclone`' , fg = 'yellow' )
click . confirm ( 'Continue anyway?' , abort = True )
s3_bucket_policy = s3_bucket_policy_file . read ( ) if s3_bucket_policy_file else None
imp = S3Importer ( d [ 'get_bucket_name' ] , static_bucket_name , output , source_backend_names , rclone , s3_endpoint , s3_profile , s3_bucket_policy )
with monkeypatch_registration_file_time ( ) :
imp . run ( ) |
def handle_cmd ( self , command , application ) :
"""Handle running a given dot command from a user .
: type command : str
: param command : The full dot command string , e . g . ` ` . edit ` ` ,
of ` ` . profile prod ` ` .
: type application : AWSShell
: param application : The application object .""" | parts = command . split ( )
cmd_name = parts [ 0 ] [ 1 : ]
if cmd_name not in self . HANDLER_CLASSES :
self . _unknown_cmd ( parts , application )
else : # Note we expect the class to support no - arg
# instantiation .
return self . HANDLER_CLASSES [ cmd_name ] ( ) . run ( parts , application ) |
def solvemdbi_rsm ( ah , rho , b , axisK , dimN = 2 ) :
r"""Solve a multiple diagonal block linear system with a scaled
identity term by repeated application of the Sherman - Morrison
equation . The computation is performed by explictly constructing
the inverse operator , leading to an : math : ` O ( K ) ` time cost and
: math : ` O ( M ^ 2 ) ` memory cost , where : math : ` M ` is the dimension of
the axis over which inner products are taken .
The solution is obtained by independently solving a set of linear
systems of the form ( see : cite : ` wohlberg - 2016 - efficient ` )
. . math : :
( \ rho I + \ mathbf { a } _ 0 \ mathbf { a } _ 0 ^ H + \ mathbf { a } _ 1 \ mathbf { a } _ 1 ^ H +
\ ; \ ldots \ ; + \ mathbf { a } _ { K - 1 } \ mathbf { a } _ { K - 1 } ^ H ) \ ; \ mathbf { x } =
\ mathbf { b }
where each : math : ` \ mathbf { a } _ k ` is an : math : ` M ` - vector .
The sums , inner products , and matrix products in this equation are taken
along the M and K axes of the corresponding multi - dimensional arrays ;
the solutions are independent over the other axes .
Parameters
ah : array _ like
Linear system component : math : ` \ mathbf { a } ^ H `
rho : float
Linear system parameter : math : ` \ rho `
b : array _ like
Linear system component : math : ` \ mathbf { b } `
axisK : int
Axis in input corresponding to index k in linear system
dimN : int , optional ( default 2)
Number of spatial dimensions arranged as leading axes in input array .
Axis M is taken to be at dimN + 2.
Returns
x : ndarray
Linear system solution : math : ` \ mathbf { x } `""" | axisM = dimN + 2
slcnc = ( slice ( None ) , ) * axisK
M = ah . shape [ axisM ]
K = ah . shape [ axisK ]
a = np . conj ( ah )
Ainv = np . ones ( ah . shape [ 0 : dimN ] + ( 1 , ) * 4 ) * np . reshape ( np . eye ( M , M ) / rho , ( 1 , ) * ( dimN + 2 ) + ( M , M ) )
for k in range ( 0 , K ) :
slck = slcnc + ( slice ( k , k + 1 ) , ) + ( slice ( None ) , np . newaxis , )
Aia = inner ( Ainv , np . swapaxes ( a [ slck ] , dimN + 2 , dimN + 3 ) , axis = dimN + 3 )
ahAia = 1.0 + inner ( ah [ slck ] , Aia , axis = dimN + 2 )
ahAi = inner ( ah [ slck ] , Ainv , axis = dimN + 2 )
AiaahAi = Aia * ahAi
Ainv = Ainv - AiaahAi / ahAia
return np . sum ( Ainv * np . swapaxes ( b [ ( slice ( None ) , ) * b . ndim + ( np . newaxis , ) ] , dimN + 2 , dimN + 3 ) , dimN + 3 ) |
def wallet_work_get ( self , wallet ) :
"""Returns a list of pairs of account and work from * * wallet * *
. . enable _ control required
. . version 8.0 required
: param wallet : Wallet to return work for
: type wallet : str
: raises : : py : exc : ` nano . rpc . RPCException `
> > > rpc . wallet _ work _ get (
. . . wallet = " 000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F "
" xrb _ 11111hifc8npp " :
"432e5cf728c90f4f " """ | wallet = self . _process_value ( wallet , 'wallet' )
payload = { "wallet" : wallet }
resp = self . call ( 'wallet_work_get' , payload )
return resp . get ( 'works' ) or { } |
def get_instance_type ( self , port ) :
"""Determine the port type based on device owner and vnic type""" | if port [ portbindings . VNIC_TYPE ] == portbindings . VNIC_BAREMETAL :
return a_const . BAREMETAL_RESOURCE
owner_to_type = { n_const . DEVICE_OWNER_DHCP : a_const . DHCP_RESOURCE , n_const . DEVICE_OWNER_DVR_INTERFACE : a_const . ROUTER_RESOURCE , trunk_consts . TRUNK_SUBPORT_OWNER : a_const . VM_RESOURCE }
if port [ 'device_owner' ] in owner_to_type . keys ( ) :
return owner_to_type [ port [ 'device_owner' ] ]
elif port [ 'device_owner' ] . startswith ( n_const . DEVICE_OWNER_COMPUTE_PREFIX ) :
return a_const . VM_RESOURCE
return None |
def angact_iso ( x , params ) :
"""Calculate angle and action variable in isochrone potential with
parameters params = ( M , b )""" | GM = Grav * params [ 0 ]
E = H_iso ( x , params )
r , p , t , vr , vphi , vt = cart2spol ( x )
st = np . sin ( t )
Lz = r * vphi * st
L = np . sqrt ( r * r * vt * vt + Lz * Lz / st / st )
if ( E > 0. ) : # Unbound
return ( np . nan , np . nan , np . nan , np . nan , np . nan , np . nan )
Jr = GM / np . sqrt ( - 2 * E ) - 0.5 * ( L + np . sqrt ( L * L + 4 * GM * params [ 1 ] ) )
action = np . array ( [ Jr , Lz , L - abs ( Lz ) ] )
c = GM / ( - 2 * E ) - params [ 1 ]
e = np . sqrt ( 1 - L * L * ( 1 + params [ 1 ] / c ) / GM / c )
eta = np . arctan2 ( r * vr / np . sqrt ( - 2. * E ) , params [ 1 ] + c - np . sqrt ( params [ 1 ] ** 2 + r * r ) )
OmR = np . power ( - 2 * E , 1.5 ) / GM
Omp = 0.5 * OmR * ( 1 + L / np . sqrt ( L * L + 4 * GM * params [ 1 ] ) )
thetar = eta - e * c * np . sin ( eta ) / ( c + params [ 1 ] )
if ( abs ( vt ) > 1e-10 ) :
psi = np . arctan2 ( np . cos ( t ) , - np . sin ( t ) * r * vt / L )
else :
psi = np . pi / 2.
a = np . sqrt ( ( 1 + e ) / ( 1 - e ) )
ap = np . sqrt ( ( 1 + e + 2 * params [ 1 ] / c ) / ( 1 - e + 2 * params [ 1 ] / c ) )
F = lambda x , y : np . pi / 2. - np . arctan ( np . tan ( np . pi / 2. - 0.5 * y ) / x ) if y > np . pi / 2. else - np . pi / 2. + np . arctan ( np . tan ( np . pi / 2. + 0.5 * y ) / x ) if y < - np . pi / 2. else np . arctan ( x * np . tan ( 0.5 * y ) )
thetaz = psi + Omp * thetar / OmR - F ( a , eta ) - F ( ap , eta ) / np . sqrt ( 1 + 4 * GM * params [ 1 ] / L / L )
LR = Lz / L
sinu = LR / np . sqrt ( 1. - LR ** 2 ) / np . tan ( t )
u = 0
if ( sinu > 1. ) :
u = np . pi / 2.
elif ( sinu < - 1. ) :
u = - np . pi / 2.
else :
u = np . arcsin ( sinu )
if ( vt > 0. ) :
u = np . pi - u
thetap = p - u + np . sign ( Lz ) * thetaz
angle = np . array ( [ thetar , thetap , thetaz ] )
return np . concatenate ( ( action , angle % ( 2. * np . pi ) ) ) |
def _irc_upper ( self , in_string ) :
"""Convert us to our upper - case equivalent , given our std .""" | conv_string = self . _translate ( in_string )
if self . _upper_trans is not None :
conv_string = in_string . translate ( self . _upper_trans )
return str . upper ( conv_string ) |
def _handle_exception ( self , row , exception ) :
"""Logs an exception occurred during transformation of a row .
: param list | dict | ( ) row : The source row .
: param Exception exception : The exception .""" | self . _log ( 'Error during processing of line {0:d}.' . format ( self . _source_reader . row_number ) )
self . _log ( row )
self . _log ( str ( exception ) )
self . _log ( traceback . format_exc ( ) ) |
def flexifunction_buffer_function_ack_send ( self , target_system , target_component , func_index , result , force_mavlink1 = False ) :
'''Flexifunction type and parameters for component at function index from
buffer
target _ system : System ID ( uint8 _ t )
target _ component : Component ID ( uint8 _ t )
func _ index : Function index ( uint16 _ t )
result : result of acknowledge , 0 = fail , 1 = good ( uint16 _ t )''' | return self . send ( self . flexifunction_buffer_function_ack_encode ( target_system , target_component , func_index , result ) , force_mavlink1 = force_mavlink1 ) |
def _classic_4d_to_nifti ( grouped_dicoms , output_file ) :
"""This function will convert siemens 4d series to a nifti
Some inspiration on which fields can be used was taken from
http : / / slicer . org / doc / html / DICOMDiffusionVolumePlugin _ 8py _ source . html""" | # Get the sorted mosaics
all_dicoms = [ i for sl in grouped_dicoms for i in sl ]
# combine into 1 list for validating
common . validate_orientation ( all_dicoms )
# Create mosaic block
logger . info ( 'Creating data block' )
full_block = _classic_get_full_block ( grouped_dicoms )
logger . info ( 'Creating affine' )
# Create the nifti header info
affine , slice_increment = common . create_affine ( grouped_dicoms [ 0 ] )
logger . info ( 'Creating nifti' )
# Convert to nifti
nii_image = nibabel . Nifti1Image ( full_block , affine )
common . set_tr_te ( nii_image , float ( grouped_dicoms [ 0 ] [ 0 ] . RepetitionTime ) , float ( grouped_dicoms [ 0 ] [ 0 ] . EchoTime ) )
logger . info ( 'Saving nifti to disk' )
# Save to disk
if output_file is not None :
nii_image . to_filename ( output_file )
if _is_diffusion_imaging ( grouped_dicoms [ 0 ] [ 0 ] ) :
logger . info ( 'Creating bval en bvec' )
bval_file = None
bvec_file = None
if output_file is not None :
base_path = os . path . dirname ( output_file )
base_name = os . path . splitext ( os . path . splitext ( os . path . basename ( output_file ) ) [ 0 ] ) [ 0 ]
logger . info ( 'Creating bval en bvec files' )
bval_file = '%s/%s.bval' % ( base_path , base_name )
bvec_file = '%s/%s.bvec' % ( base_path , base_name )
bval = _create_bvals ( grouped_dicoms , bval_file )
bvec = _create_bvecs ( grouped_dicoms , bvec_file )
return { 'NII_FILE' : output_file , 'BVAL_FILE' : bval_file , 'BVEC_FILE' : bvec_file , 'NII' : nii_image , 'BVAL' : bval , 'BVEC' : bvec , 'MAX_SLICE_INCREMENT' : slice_increment }
return { 'NII_FILE' : output_file , 'NII' : nii_image , 'MAX_SLICE_INCREMENT' : slice_increment } |
def directive_DCB ( self , label , params ) :
"""label DCB value [ , value . . . ]
Allocate a byte space in read only memory for the value or list of values""" | # TODO make this read only
# TODO check for byte size
self . labels [ label ] = self . space_pointer
if params in self . equates :
params = self . equates [ params ]
self . memory [ self . space_pointer ] = self . convert_to_integer ( params ) & 0xFF
self . space_pointer += 1 |
def set_parent ( self , key_name , new_parent ) :
"""Sets the parent of the key .""" | self . unbake ( )
kf = self . dct [ key_name ]
kf [ 'parent' ] = new_parent
self . bake ( ) |
def inherit_type ( self , type_cls : Type [ TInherit ] ) -> Union [ TInherit , 'Publisher' ] :
"""enables the usage of method and attribute overloading for this
publisher .""" | self . _inherited_type = type_cls
return self |
def format_t_into_dhms_format ( timestamp ) :
"""Convert an amount of second into day , hour , min and sec
: param timestamp : seconds
: type timestamp : int
: return : ' Ad Bh Cm Ds '
: rtype : str
> > > format _ t _ into _ dhms _ format ( 456189)
'5d 6h 43m 9s '
> > > format _ t _ into _ dhms _ format ( 3600)
'0d 1h 0m 0s '""" | mins , timestamp = divmod ( timestamp , 60 )
hour , mins = divmod ( mins , 60 )
day , hour = divmod ( hour , 24 )
return '%sd %sh %sm %ss' % ( day , hour , mins , timestamp ) |
def get_current_revision ( database_url : str , version_table : str = DEFAULT_ALEMBIC_VERSION_TABLE ) -> str :
"""Ask the database what its current revision is .
Arguments :
database _ url : SQLAlchemy URL for the database
version _ table : table name for Alembic versions""" | engine = create_engine ( database_url )
conn = engine . connect ( )
opts = { 'version_table' : version_table }
mig_context = MigrationContext . configure ( conn , opts = opts )
return mig_context . get_current_revision ( ) |
def getImage ( path , dockerfile , tag ) :
'''Check if an image with a given tag exists . If not , build an image from
using a given dockerfile in a given path , tagging it with a given tag .
No extra side effects . Handles and reraises BuildError , TypeError , and
APIError exceptions .''' | image = getImageByTag ( tag )
if not image : # Build an Image using the dockerfile in the path
try :
image = client . images . build ( path = path , dockerfile = dockerfile , tag = tag )
except BuildError as exc :
eprint ( "Failed to build docker image" )
raise exc
except TypeError as exc :
eprint ( "You must give a path to the build environemnt." )
raise exc
except APIError as exc :
eprint ( "Unhandled error while building image" , tag )
raise exc
return image |
def start ( self ) :
"""The main method that starts the service . This is blocking .""" | self . _initial_setup ( )
self . on_service_start ( )
self . app = self . make_tornado_app ( )
enable_pretty_logging ( )
self . app . listen ( self . port , address = self . host )
self . _start_periodic_tasks ( )
# starts the event handlers
self . _initialize_event_handlers ( )
self . _start_event_handlers ( )
try :
self . io_loop . start ( )
except RuntimeError : # TODO : find a way to check if the io _ loop is running before trying to start it
# this method to check if the loop is running is ugly
pass |
def fromJD ( jd , utcoffset ) :
"""Builds a Datetime object given a jd and utc offset .""" | if not isinstance ( utcoffset , Time ) :
utcoffset = Time ( utcoffset )
localJD = jd + utcoffset . value / 24.0
date = Date ( round ( localJD ) )
time = Time ( ( localJD + 0.5 - date . jdn ) * 24 )
return Datetime ( date , time , utcoffset ) |
def _cast_types ( self , values , cast_type , column ) :
"""Cast values to specified type
Parameters
values : ndarray
cast _ type : string or np . dtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
converted : ndarray""" | if is_categorical_dtype ( cast_type ) :
known_cats = ( isinstance ( cast_type , CategoricalDtype ) and cast_type . categories is not None )
if not is_object_dtype ( values ) and not known_cats : # XXX this is for consistency with
# c - parser which parses all categories
# as strings
values = astype_nansafe ( values , str )
cats = Index ( values ) . unique ( ) . dropna ( )
values = Categorical . _from_inferred_categories ( cats , cats . get_indexer ( values ) , cast_type , true_values = self . true_values )
# use the EA ' s implementation of casting
elif is_extension_array_dtype ( cast_type ) : # ensure cast _ type is an actual dtype and not a string
cast_type = pandas_dtype ( cast_type )
array_type = cast_type . construct_array_type ( )
try :
return array_type . _from_sequence_of_strings ( values , dtype = cast_type )
except NotImplementedError :
raise NotImplementedError ( "Extension Array: {ea} must implement " "_from_sequence_of_strings in order " "to be used in parser methods" . format ( ea = array_type ) )
else :
try :
values = astype_nansafe ( values , cast_type , copy = True , skipna = True )
except ValueError :
raise ValueError ( "Unable to convert column {column} to type " "{cast_type}" . format ( column = column , cast_type = cast_type ) )
return values |
def _ExpandPath ( self , target , vals , paths ) :
"""Extract path information , interpolating current path values as needed .""" | if target not in self . _TARGETS :
return
expanded = [ ]
for val in vals : # Null entries specify the current directory , so : a : : b : c : is equivalent
# to . : a : . : b : c : .
shellvar = self . _SHELLVAR_RE . match ( val )
if not val :
expanded . append ( "." )
elif shellvar : # The value may actually be in braces as well . Always convert to upper
# case so we deal with stuff like lowercase csh path .
existing = paths . get ( shellvar . group ( 1 ) . upper ( ) )
if existing :
expanded . extend ( existing )
else :
expanded . append ( val )
else :
expanded . append ( val )
paths [ target ] = expanded |
def make_symlink ( src_path , lnk_path ) :
"""Safely create a symbolic link to an input field .""" | # Check for Lustre 60 - character symbolic link path bug
if CHECK_LUSTRE_PATH_LEN :
src_path = patch_lustre_path ( src_path )
lnk_path = patch_lustre_path ( lnk_path )
# os . symlink will happily make a symlink to a non - existent
# file , but we don ' t want that behaviour
# XXX : Do we want to be doing this ?
if not os . path . exists ( src_path ) :
return
try :
os . symlink ( src_path , lnk_path )
except EnvironmentError as exc :
if exc . errno != errno . EEXIST :
raise
elif not os . path . islink ( lnk_path ) : # Warn the user , but do not interrupt the job
print ( "Warning: Cannot create symbolic link to {p}; a file named " "{f} already exists." . format ( p = src_path , f = lnk_path ) )
else : # Overwrite any existing symbolic link
if os . path . realpath ( lnk_path ) != src_path :
os . remove ( lnk_path )
os . symlink ( src_path , lnk_path ) |
def _get_tick_frac_labels ( self ) :
"""Get the major ticks , minor ticks , and major labels""" | minor_num = 4
# number of minor ticks per major division
if ( self . axis . scale_type == 'linear' ) :
domain = self . axis . domain
if domain [ 1 ] < domain [ 0 ] :
flip = True
domain = domain [ : : - 1 ]
else :
flip = False
offset = domain [ 0 ]
scale = domain [ 1 ] - domain [ 0 ]
transforms = self . axis . transforms
length = self . axis . pos [ 1 ] - self . axis . pos [ 0 ]
# in logical coords
n_inches = np . sqrt ( np . sum ( length ** 2 ) ) / transforms . dpi
# major = np . linspace ( domain [ 0 ] , domain [ 1 ] , num = 11)
# major = MaxNLocator ( 10 ) . tick _ values ( * domain )
major = _get_ticks_talbot ( domain [ 0 ] , domain [ 1 ] , n_inches , 2 )
labels = [ '%g' % x for x in major ]
majstep = major [ 1 ] - major [ 0 ]
minor = [ ]
minstep = majstep / ( minor_num + 1 )
minstart = 0 if self . axis . _stop_at_major [ 0 ] else - 1
minstop = - 1 if self . axis . _stop_at_major [ 1 ] else 0
for i in range ( minstart , len ( major ) + minstop ) :
maj = major [ 0 ] + i * majstep
minor . extend ( np . linspace ( maj + minstep , maj + majstep - minstep , minor_num ) )
major_frac = ( major - offset ) / scale
minor_frac = ( np . array ( minor ) - offset ) / scale
major_frac = major_frac [ : : - 1 ] if flip else major_frac
use_mask = ( major_frac > - 0.0001 ) & ( major_frac < 1.0001 )
major_frac = major_frac [ use_mask ]
labels = [ l for li , l in enumerate ( labels ) if use_mask [ li ] ]
minor_frac = minor_frac [ ( minor_frac > - 0.0001 ) & ( minor_frac < 1.0001 ) ]
elif self . axis . scale_type == 'logarithmic' :
return NotImplementedError
elif self . axis . scale_type == 'power' :
return NotImplementedError
return major_frac , minor_frac , labels |
def recv_all ( self , timeout = 'default' ) :
"""Return all data recieved until connection closes .
Aliases : read _ all , readall , recvall""" | self . _print_recv_header ( '======== Receiving until close{timeout_text} ========' , timeout )
return self . _recv_predicate ( lambda s : 0 , timeout , raise_eof = False ) |
def delay_off ( self ) :
"""The ` timer ` trigger will periodically change the LED brightness between
0 and the current brightness setting . The ` off ` time can
be specified via ` delay _ off ` attribute in milliseconds .""" | # Workaround for ev3dev / ev3dev # 225.
# ' delay _ on ' and ' delay _ off ' attributes are created when trigger is set
# to ' timer ' , and destroyed when it is set to anything else .
# This means the file cache may become outdated , and we may have to
# reopen the file .
for retry in ( True , False ) :
try :
self . _delay_off , value = self . get_attr_int ( self . _delay_off , 'delay_off' )
return value
except OSError :
if retry :
self . _delay_off = None
else :
raise |
def ipi_name_number ( name = None ) :
"""IPI Name Number field .
An IPI Name Number is composed of eleven digits .
So , for example , an IPI Name Number code field can contain 00014107338.
: param name : name for the field
: return : a parser for the IPI Name Number field""" | if name is None :
name = 'IPI Name Number Field'
field = basic . numeric ( 11 )
field . setName ( name )
return field . setResultsName ( 'ipi_name_n' ) |
def submit_jobs ( job_specs ) :
"""Submit a job
Args :
job _ spec ( dict ) : The job specifiation ( see Grid ' 5000 API reference )""" | gk = get_api_client ( )
jobs = [ ]
try :
for site , job_spec in job_specs :
logger . info ( "Submitting %s on %s" % ( job_spec , site ) )
jobs . append ( gk . sites [ site ] . jobs . create ( job_spec ) )
except Exception as e :
logger . error ( "An error occured during the job submissions" )
logger . error ( "Cleaning the jobs created" )
for job in jobs :
job . delete ( )
raise ( e )
return jobs |
def get_if_raw_hwaddr ( ifname ) :
"""Returns the packed MAC address configured on ' ifname ' .""" | NULL_MAC_ADDRESS = b'\x00' * 6
# Handle the loopback interface separately
if ifname == LOOPBACK_NAME :
return ( ARPHDR_LOOPBACK , NULL_MAC_ADDRESS )
# Get ifconfig output
try :
fd = os . popen ( "%s %s" % ( conf . prog . ifconfig , ifname ) )
except OSError as msg :
raise Scapy_Exception ( "Failed to execute ifconfig: (%s)" % msg )
# Get MAC addresses
addresses = [ l for l in fd . readlines ( ) if l . find ( "ether" ) >= 0 or l . find ( "lladdr" ) >= 0 or l . find ( "address" ) >= 0 ]
if not addresses :
raise Scapy_Exception ( "No MAC address found on %s !" % ifname )
# Pack and return the MAC address
mac = addresses [ 0 ] . split ( ' ' ) [ 1 ]
mac = [ chr ( int ( b , 16 ) ) for b in mac . split ( ':' ) ]
return ( ARPHDR_ETHER , '' . join ( mac ) ) |
def _verify ( leniency , numobj , candidate , matcher ) :
"""Returns True if number is a verified number according to the
leniency .""" | if leniency == Leniency . POSSIBLE :
return is_possible_number ( numobj )
elif leniency == Leniency . VALID :
if ( not is_valid_number ( numobj ) or not _contains_only_valid_x_chars ( numobj , candidate ) ) :
return False
return _is_national_prefix_present_if_required ( numobj )
elif leniency == Leniency . STRICT_GROUPING :
return _verify_strict_grouping ( numobj , candidate , matcher )
elif leniency == Leniency . EXACT_GROUPING :
return _verify_exact_grouping ( numobj , candidate , matcher )
else :
raise Exception ( "Error: unsupported Leniency value %s" % leniency ) |
def crop ( im , r , c , sz ) :
'''crop image into a square of size sz ,''' | return im [ r : r + sz , c : c + sz ] |
def mnist_blackbox ( train_start = 0 , train_end = 60000 , test_start = 0 , test_end = 10000 , nb_classes = NB_CLASSES , batch_size = BATCH_SIZE , learning_rate = LEARNING_RATE , nb_epochs = NB_EPOCHS , holdout = HOLDOUT , data_aug = DATA_AUG , nb_epochs_s = NB_EPOCHS_S , lmbda = LMBDA , aug_batch_size = AUG_BATCH_SIZE ) :
"""MNIST tutorial for the black - box attack from arxiv . org / abs / 1602.02697
: param train _ start : index of first training set example
: param train _ end : index of last training set example
: param test _ start : index of first test set example
: param test _ end : index of last test set example
: return : a dictionary with :
* black - box model accuracy on test set
* substitute model accuracy on test set
* black - box model accuracy on adversarial examples transferred
from the substitute model""" | # Set logging level to see debug information
set_log_level ( logging . DEBUG )
# Dictionary used to keep track and return key accuracies
accuracies = { }
# Perform tutorial setup
assert setup_tutorial ( )
# Create TF session
sess = tf . Session ( )
# Get MNIST data
mnist = MNIST ( train_start = train_start , train_end = train_end , test_start = test_start , test_end = test_end )
x_train , y_train = mnist . get_set ( 'train' )
x_test , y_test = mnist . get_set ( 'test' )
# Initialize substitute training set reserved for adversary
x_sub = x_test [ : holdout ]
y_sub = np . argmax ( y_test [ : holdout ] , axis = 1 )
# Redefine test set as remaining samples unavailable to adversaries
x_test = x_test [ holdout : ]
y_test = y_test [ holdout : ]
# Obtain Image parameters
img_rows , img_cols , nchannels = x_train . shape [ 1 : 4 ]
nb_classes = y_train . shape [ 1 ]
# Define input TF placeholder
x = tf . placeholder ( tf . float32 , shape = ( None , img_rows , img_cols , nchannels ) )
y = tf . placeholder ( tf . float32 , shape = ( None , nb_classes ) )
# Seed random number generator so tutorial is reproducible
rng = np . random . RandomState ( [ 2017 , 8 , 30 ] )
# Simulate the black - box model locally
# You could replace this by a remote labeling API for instance
print ( "Preparing the black-box model." )
prep_bbox_out = prep_bbox ( sess , x , y , x_train , y_train , x_test , y_test , nb_epochs , batch_size , learning_rate , rng , nb_classes , img_rows , img_cols , nchannels )
model , bbox_preds , accuracies [ 'bbox' ] = prep_bbox_out
# Train substitute using method from https : / / arxiv . org / abs / 1602.02697
print ( "Training the substitute model." )
train_sub_out = train_sub ( sess , x , y , bbox_preds , x_sub , y_sub , nb_classes , nb_epochs_s , batch_size , learning_rate , data_aug , lmbda , aug_batch_size , rng , img_rows , img_cols , nchannels )
model_sub , preds_sub = train_sub_out
# Evaluate the substitute model on clean test examples
eval_params = { 'batch_size' : batch_size }
acc = model_eval ( sess , x , y , preds_sub , x_test , y_test , args = eval_params )
accuracies [ 'sub' ] = acc
# Initialize the Fast Gradient Sign Method ( FGSM ) attack object .
fgsm_par = { 'eps' : 0.3 , 'ord' : np . inf , 'clip_min' : 0. , 'clip_max' : 1. }
fgsm = FastGradientMethod ( model_sub , sess = sess )
# Craft adversarial examples using the substitute
eval_params = { 'batch_size' : batch_size }
x_adv_sub = fgsm . generate ( x , ** fgsm_par )
# Evaluate the accuracy of the " black - box " model on adversarial examples
accuracy = model_eval ( sess , x , y , model . get_logits ( x_adv_sub ) , x_test , y_test , args = eval_params )
print ( 'Test accuracy of oracle on adversarial examples generated ' 'using the substitute: ' + str ( accuracy ) )
accuracies [ 'bbox_on_sub_adv_ex' ] = accuracy
return accuracies |
def _cleanup_markers ( context_id , task_ids ) :
"""Delete the FuriousAsyncMarker entities corresponding to ids .""" | logging . debug ( "Cleanup %d markers for Context %s" , len ( task_ids ) , context_id )
# TODO : Handle exceptions and retries here .
delete_entities = [ ndb . Key ( FuriousAsyncMarker , id ) for id in task_ids ]
delete_entities . append ( ndb . Key ( FuriousCompletionMarker , context_id ) )
ndb . delete_multi ( delete_entities )
logging . debug ( "Markers cleaned." ) |
def is_valid_index ( self , code ) :
"""returns : True | Flase , based on whether code is valid""" | index_list = self . get_index_list ( )
return True if code . upper ( ) in index_list else False |
def delete_option_group ( name , region = None , key = None , keyid = None , profile = None ) :
'''Delete an RDS option group .
CLI example : :
salt myminion boto _ rds . delete _ option _ group my - opt - group region = us - east - 1''' | try :
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
if not conn :
return { 'deleted' : bool ( conn ) }
res = conn . delete_option_group ( OptionGroupName = name )
if not res :
return { 'deleted' : bool ( res ) , 'message' : 'Failed to delete RDS option group {0}.' . format ( name ) }
return { 'deleted' : bool ( res ) , 'message' : 'Deleted RDS option group {0}.' . format ( name ) }
except ClientError as e :
return { 'error' : __utils__ [ 'boto3.get_error' ] ( e ) } |
def _after_flush_handler ( session , _flush_context ) :
"""Archive all new / updated / deleted data""" | dialect = get_dialect ( session )
handlers = [ ( _versioned_delete , session . deleted ) , ( _versioned_insert , session . new ) , ( _versioned_update , session . dirty ) , ]
for handler , rows in handlers : # TODO : Bulk archive insert statements
for row in rows :
if not isinstance ( row , SavageModelMixin ) :
continue
if not hasattr ( row , 'ArchiveTable' ) :
raise LogTableCreationError ( 'Need to register Savage tables!!' )
user_id = getattr ( row , '_updated_by' , None )
handler ( row , session , user_id , dialect ) |
def set_position ( self , decl_pos ) :
"""Set editor position from ENSIME declPos data .""" | if decl_pos [ "typehint" ] == "LineSourcePosition" :
self . editor . set_cursor ( decl_pos [ 'line' ] , 0 )
else : # OffsetSourcePosition
point = decl_pos [ "offset" ]
row , col = self . editor . point2pos ( point + 1 )
self . editor . set_cursor ( row , col ) |
def output ( self ) :
"""Output the results to either STDOUT or
: return :""" | if not self . path_output :
self . output_to_fd ( sys . stdout )
else :
with open ( self . path_output , "w" ) as out :
self . output_to_fd ( out ) |
def _list_nodes ( full = False ) :
'''Helper function for the list _ * query functions - Constructs the
appropriate dictionaries to return from the API query .
full
If performing a full query , such as in list _ nodes _ full , change
this parameter to ` ` True ` ` .''' | server , user , password = _get_xml_rpc ( )
auth = ':' . join ( [ user , password ] )
vm_pool = server . one . vmpool . info ( auth , - 2 , - 1 , - 1 , - 1 ) [ 1 ]
vms = { }
for vm in _get_xml ( vm_pool ) :
name = vm . find ( 'NAME' ) . text
vms [ name ] = { }
cpu_size = vm . find ( 'TEMPLATE' ) . find ( 'CPU' ) . text
memory_size = vm . find ( 'TEMPLATE' ) . find ( 'MEMORY' ) . text
private_ips = [ ]
for nic in vm . find ( 'TEMPLATE' ) . findall ( 'NIC' ) :
try :
private_ips . append ( nic . find ( 'IP' ) . text )
except Exception :
pass
vms [ name ] [ 'id' ] = vm . find ( 'ID' ) . text
if 'TEMPLATE_ID' in vm . find ( 'TEMPLATE' ) :
vms [ name ] [ 'image' ] = vm . find ( 'TEMPLATE' ) . find ( 'TEMPLATE_ID' ) . text
vms [ name ] [ 'name' ] = name
vms [ name ] [ 'size' ] = { 'cpu' : cpu_size , 'memory' : memory_size }
vms [ name ] [ 'state' ] = vm . find ( 'STATE' ) . text
vms [ name ] [ 'private_ips' ] = private_ips
vms [ name ] [ 'public_ips' ] = [ ]
if full :
vms [ vm . find ( 'NAME' ) . text ] = _xml_to_dict ( vm )
return vms |
def query ( self , coords , order = 1 ) :
"""Returns E ( B - V ) at the specified location ( s ) on the sky . See Table 6 of
Schlafly & Finkbeiner ( 2011 ) for instructions on how to convert this
quantity to extinction in various passbands .
Args :
coords ( ` astropy . coordinates . SkyCoord ` ) : The coordinates to query .
order ( Optional [ int ] ) : Interpolation order to use . Defaults to ` 1 ` ,
for linear interpolation .
Returns :
A float array containing the SFD E ( B - V ) at every input coordinate .
The shape of the output will be the same as the shape of the
coordinates stored by ` coords ` .""" | return super ( SFDQuery , self ) . query ( coords , order = order ) |
def gradfunc ( self , p ) :
"""The gradient - computing function that gets passed to the optimizers ,
if needed .""" | self . _set_stochastics ( p )
for i in xrange ( self . len ) :
self . grad [ i ] = self . diff ( i )
return - 1 * self . grad |
def _get_norms_of_rows ( data_frame , method ) :
"""return a column vector containing the norm of each row""" | if method == 'vector' :
norm_vector = np . linalg . norm ( data_frame . values , axis = 1 )
elif method == 'last' :
norm_vector = data_frame . iloc [ : , - 1 ] . values
elif method == 'mean' :
norm_vector = np . mean ( data_frame . values , axis = 1 )
elif method == 'first' :
norm_vector = data_frame . iloc [ : , 0 ] . values
else :
raise ValueError ( "no normalization method '{0}'" . format ( method ) )
return norm_vector |
def init_hidden ( self , batch_size ) :
"""Initiate the initial state .
: param batch _ size : batch size .
: type batch _ size : int
: return : Initial state of LSTM
: rtype : pair of torch . Tensors of shape ( num _ layers * num _ directions ,
batch _ size , hidden _ size )""" | b = 2 if self . bidirectional else 1
if self . use_cuda :
return ( torch . zeros ( self . num_layers * b , batch_size , self . lstm_hidden ) . cuda ( ) , torch . zeros ( self . num_layers * b , batch_size , self . lstm_hidden ) . cuda ( ) , )
else :
return ( torch . zeros ( self . num_layers * b , batch_size , self . lstm_hidden ) , torch . zeros ( self . num_layers * b , batch_size , self . lstm_hidden ) , ) |
def patched_fax_v1_init ( self , domain ) :
"""Initialize the V1 version of Fax
: returns : V1 version of Fax
: rtype : twilio . rest . fax . v1 . V1 . V1""" | print ( domain . __class__ . __name__ )
super ( TwilioV1 , self ) . __init__ ( domain )
self . version = "2010-04-01/Accounts/" + domain . account_sid
self . _faxes = None |
def _representative_structure_setter ( self , structprop , keep_chain , clean = True , keep_chemicals = None , out_suffix = '_clean' , outdir = None , force_rerun = False ) :
"""Set the representative structure by 1 ) cleaning it and 2 ) copying over attributes of the original structure .
The structure is copied because the chains stored may change , and cleaning it makes a new PDB file .
Args :
structprop ( StructProp ) : StructProp object to set as representative
keep _ chain ( str ) : Chain ID to keep
clean ( bool ) : If the PDB file should be cleaned ( see ssbio . structure . utils . cleanpdb )
keep _ chemicals ( str , list ) : Keep specified chemical names
out _ suffix ( str ) : Suffix to append to clean PDB file
outdir ( str ) : Path to output directory
Returns :
StructProp : representative structure""" | # Set output directory for cleaned PDB file
if not outdir :
outdir = self . structure_dir
if not outdir :
raise ValueError ( 'Output directory must be specified' )
# Create new ID for this representative structure , it cannot be the same as the original one
new_id = 'REP-{}' . format ( structprop . id )
# Remove the previously set representative structure if set to force rerun
if self . structures . has_id ( new_id ) :
if force_rerun :
existing = self . structures . get_by_id ( new_id )
self . structures . remove ( existing )
# If the structure is to be cleaned , and which chain to keep
if clean :
final_pdb = structprop . clean_structure ( outdir = outdir , out_suffix = out_suffix , keep_chemicals = keep_chemicals , keep_chains = keep_chain , force_rerun = force_rerun )
log . debug ( '{}: cleaned structure and saved new file at {}' . format ( structprop . id , final_pdb ) )
else :
final_pdb = structprop . structure_path
self . representative_structure = StructProp ( ident = new_id , chains = keep_chain , mapped_chains = keep_chain , structure_path = final_pdb , file_type = 'pdb' )
self . representative_chain = keep_chain
self . representative_structure . update ( structprop . get_dict_with_chain ( chain = keep_chain ) , only_keys = self . __representative_structure_attributes , overwrite = True )
# Save the original structure ID as an extra attribute
self . representative_structure . original_structure_id = structprop . id
# Also need to parse the clean structure and save its sequence . .
self . representative_structure . parse_structure ( )
# And finally add it to the list of structures
self . structures . append ( self . representative_structure ) |
def save_xml ( self , doc , element ) :
'''Save this component group into an xml . dom . Element object .''' | element . setAttributeNS ( RTS_NS , RTS_NS_S + 'groupID' , self . group_id )
for m in self . members :
new_element = doc . createElementNS ( RTS_NS , RTS_NS_S + 'Members' )
m . save_xml ( doc , new_element )
element . appendChild ( new_element ) |
def describe_hosted_zones ( zone_id = None , domain_name = None , region = None , key = None , keyid = None , profile = None ) :
'''Return detailed info about one , or all , zones in the bound account .
If neither zone _ id nor domain _ name is provided , return all zones .
Note that the return format is slightly different between the ' all '
and ' single ' description types .
zone _ id
The unique identifier for the Hosted Zone
domain _ name
The FQDN of the Hosted Zone ( including final period )
region
Region to connect to .
key
Secret key to be used .
keyid
Access key to be used .
profile
A dict with region , key and keyid , or a pillar key ( string ) that
contains a dict with region , key and keyid .
CLI Example :
. . code - block : : bash
salt myminion boto _ route53 . describe _ hosted _ zones domain _ name = foo . bar . com . profile = ' { " region " : " us - east - 1 " , " keyid " : " A12345678AB " , " key " : " xblahblahblah " } ' ''' | conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
if zone_id and domain_name :
raise SaltInvocationError ( 'At most one of zone_id or domain_name may ' 'be provided' )
retries = 10
while retries :
try :
if zone_id :
zone_id = zone_id . replace ( '/hostedzone/' , '' ) if zone_id . startswith ( '/hostedzone/' ) else zone_id
ret = getattr ( conn . get_hosted_zone ( zone_id ) , 'GetHostedZoneResponse' , None )
elif domain_name :
ret = getattr ( conn . get_hosted_zone_by_name ( domain_name ) , 'GetHostedZoneResponse' , None )
else :
marker = None
ret = None
while marker is not '' :
r = conn . get_all_hosted_zones ( start_marker = marker , zone_list = ret )
ret = r [ 'ListHostedZonesResponse' ] [ 'HostedZones' ]
marker = r [ 'ListHostedZonesResponse' ] . get ( 'NextMarker' , '' )
return ret if ret else [ ]
except DNSServerError as e :
if retries :
if 'Throttling' == e . code :
log . debug ( 'Throttled by AWS API.' )
elif 'PriorRequestNotComplete' == e . code :
log . debug ( 'The request was rejected by AWS API.\
Route 53 was still processing a prior request' )
time . sleep ( 3 )
retries -= 1
continue
log . error ( 'Could not list zones: %s' , e . message )
return [ ] |
def transform ( self , data ) :
""": param data : DataFrame with column to encode
: return : encoded Series""" | with timer ( 'transform %s' % self . name , logging . DEBUG ) :
transformed = super ( NestedUnique , self ) . transform ( self . unnest ( data ) )
return transformed . reshape ( ( len ( data ) , self . sequence_length ) ) |
def shape_weights_hidden ( self ) -> Tuple [ int , int , int ] :
"""Shape of the array containing the activation of the hidden neurons .
The first integer value is the number of connection between the
hidden layers , the second integer value is maximum number of
neurons of all hidden layers feeding information into another
hidden layer ( all except the last one ) , and the third integer
value is the maximum number of the neurons of all hidden layers
receiving information from another hidden layer ( all except the
first one ) :
> > > from hydpy import ANN
> > > ann = ANN ( None )
> > > ann ( nmb _ inputs = 6 , nmb _ neurons = ( 4 , 3 , 2 ) , nmb _ outputs = 6)
> > > ann . shape _ weights _ hidden
(2 , 4 , 3)
> > > ann ( nmb _ inputs = 6 , nmb _ neurons = ( 4 , ) , nmb _ outputs = 6)
> > > ann . shape _ weights _ hidden
(0 , 0 , 0)""" | if self . nmb_layers > 1 :
nmb_neurons = self . nmb_neurons
return ( self . nmb_layers - 1 , max ( nmb_neurons [ : - 1 ] ) , max ( nmb_neurons [ 1 : ] ) )
return 0 , 0 , 0 |
def ranges ( self ) :
"""Returns a list of addresses with source data .""" | ranges = self . _target . getRanges ( )
return map ( SheetAddress . _from_uno , ranges ) |
def find_spelling ( n ) :
"""Finds d , r s . t . n - 1 = 2 ^ r * d""" | r = 0
d = n - 1
# divmod used for large numbers
quotient , remainder = divmod ( d , 2 )
# while we can still divide 2 ' s into n - 1 . . .
while remainder != 1 :
r += 1
d = quotient
# previous quotient before we overwrite it
quotient , remainder = divmod ( d , 2 )
return r , d |
def get_current_version_by_config_file ( ) -> str :
"""Get current version from the version variable defined in the configuration
: return : A string with the current version number
: raises ImproperConfigurationError : if version variable cannot be parsed""" | debug ( 'get_current_version_by_config_file' )
filename , variable = config . get ( 'semantic_release' , 'version_variable' ) . split ( ':' )
variable = variable . strip ( )
debug ( filename , variable )
with open ( filename , 'r' ) as fd :
parts = re . search ( r'^{0}\s*=\s*[\'"]([^\'"]*)[\'"]' . format ( variable ) , fd . read ( ) , re . MULTILINE )
if not parts :
raise ImproperConfigurationError
debug ( parts )
return parts . group ( 1 ) |
def validate ( self , signature , timestamp , nonce ) :
"""Validate request signature .
: param signature : A string signature parameter sent by weixin .
: param timestamp : A int timestamp parameter sent by weixin .
: param nonce : A int nonce parameter sent by weixin .""" | if not self . token :
raise RuntimeError ( 'WEIXIN_TOKEN is missing' )
if self . expires_in :
try :
timestamp = int ( timestamp )
except ( ValueError , TypeError ) : # fake timestamp
return False
delta = time . time ( ) - timestamp
if delta < 0 : # this is a fake timestamp
return False
if delta > self . expires_in : # expired timestamp
return False
values = [ self . token , str ( timestamp ) , str ( nonce ) ]
s = '' . join ( sorted ( values ) )
hsh = hashlib . sha1 ( s . encode ( 'utf-8' ) ) . hexdigest ( )
return signature == hsh |
def generate_id ( ) :
"""Generate a 64bit base 16 ID for use as a Span or Trace ID""" | global _current_pid
pid = os . getpid ( )
if _current_pid != pid :
_current_pid = pid
_rnd . seed ( int ( 1000000 * time . time ( ) ) ^ pid )
id = format ( _rnd . randint ( 0 , 18446744073709551615 ) , '02x' )
if len ( id ) < 16 :
id = id . zfill ( 16 )
return id |
def apply_heuristic ( self , node_a , node_b , heuristic = None ) :
"""helper function to apply heuristic""" | if not heuristic :
heuristic = self . heuristic
return heuristic ( abs ( node_a . x - node_b . x ) , abs ( node_a . y - node_b . y ) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.