signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def list_domains ( ) :
'''Return a list of the salt _ id names of all available Vagrant VMs on
this host without regard to the path where they are defined .
CLI Example :
. . code - block : : bash
salt ' * ' vagrant . list _ domains - - log - level = info
The log shows information about all known Vagrant environments
on this machine . This data is cached and may not be completely
up - to - date .'''
|
vms = [ ]
cmd = 'vagrant global-status'
reply = __salt__ [ 'cmd.shell' ] ( cmd )
log . info ( '--->\n%s' , reply )
for line in reply . split ( '\n' ) : # build a list of the text reply
tokens = line . strip ( ) . split ( )
try :
_ = int ( tokens [ 0 ] , 16 )
# valid id numbers are hexadecimal
except ( ValueError , IndexError ) :
continue
# skip lines without valid id numbers
machine = tokens [ 1 ]
cwd = tokens [ - 1 ]
name = get_machine_id ( machine , cwd )
if name :
vms . append ( name )
return vms
|
def _create_variant ( self , start , end , ref , alt , fsext_len = None , is_dup = False , acc = None , is_ambiguous = False , is_sub = False , is_ext = False , is_no_protein = False , is_init_met = False ) :
"""Creates a SequenceVariant object"""
|
if is_init_met :
posedit = AARefAlt ( ref = ref , alt = alt , init_met = True )
elif is_ambiguous :
posedit = None
else :
interval = Interval ( start = start , end = end )
# Note - order matters
if is_no_protein :
edit = '0'
elif is_sub :
edit = AASub ( ref = ref , alt = alt )
elif is_ext :
edit = AAExt ( ref = ref , alt = alt , aaterm = '*' , length = fsext_len )
elif self . _is_frameshift :
edit = AAFs ( ref = ref , alt = alt , length = fsext_len )
elif is_dup :
edit = Dup ( )
elif ref == alt == '' :
edit = AARefAlt ( ref = '' , alt = '' )
else :
edit = AARefAlt ( ref = ref , alt = alt )
posedit = PosEdit ( pos = interval , edit = edit , uncertain = hgvs . global_config . mapping . inferred_p_is_uncertain )
var_p = hgvs . sequencevariant . SequenceVariant ( acc , 'p' , posedit )
return var_p
|
def _ExtractGoogleSearchQuery ( self , url ) :
"""Extracts a search query from a Google URL .
Google Drive : https : / / drive . google . com / drive / search ? q = query
Google Search : https : / / www . google . com / search ? q = query
Google Sites : https : / / sites . google . com / site / . * / system / app / pages /
search ? q = query
Args :
url ( str ) : URL .
Returns :
str : search query or None if no query was found ."""
|
if 'search' not in url or 'q=' not in url :
return None
line = self . _GetBetweenQEqualsAndAmpersand ( url )
if not line :
return None
return line . replace ( '+' , ' ' )
|
def _get_compute_func ( self , nmr_samples , thinning , return_output ) :
"""Get the MCMC algorithm as a computable function .
Args :
nmr _ samples ( int ) : the number of samples we will draw
thinning ( int ) : the thinning factor we want to use
return _ output ( boolean ) : if the kernel should return output
Returns :
mot . lib . cl _ function . CLFunction : the compute function"""
|
cl_func = '''
void compute(global uint* rng_state,
global mot_float_type* current_chain_position,
global mot_float_type* current_log_likelihood,
global mot_float_type* current_log_prior,
ulong iteration_offset,
ulong nmr_iterations,
''' + ( '''global mot_float_type* samples,
global mot_float_type* log_likelihoods,
global mot_float_type* log_priors,''' if return_output else '' ) + '''
void* method_data,
void* data){
bool is_first_work_item = get_local_id(0) == 0;
rand123_data rand123_rng_data = rand123_initialize_data((uint[]){
rng_state[0], rng_state[1], rng_state[2], rng_state[3],
rng_state[4], rng_state[5], 0, 0});
void* rng_data = (void*)&rand123_rng_data;
for(ulong i = 0; i < nmr_iterations; i++){
'''
if return_output :
cl_func += '''
if(is_first_work_item){
if(i % ''' + str ( thinning ) + ''' == 0){
log_likelihoods[i / ''' + str ( thinning ) + '''] = *current_log_likelihood;
log_priors[i / ''' + str ( thinning ) + '''] = *current_log_prior;
for(uint j = 0; j < ''' + str ( self . _nmr_params ) + '''; j++){
samples[(ulong)(i / ''' + str ( thinning ) + ''') // remove the interval
+ j * ''' + str ( nmr_samples ) + ''' // parameter index
] = current_chain_position[j];
}
}
}
'''
cl_func += '''
_advanceSampler(method_data, data, i + iteration_offset, rng_data,
current_chain_position, current_log_likelihood, current_log_prior);
}
if(is_first_work_item){
uint state[8];
rand123_data_to_array(rand123_rng_data, state);
for(uint i = 0; i < 6; i++){
rng_state[i] = state[i];
}
}
}
'''
return SimpleCLFunction . from_string ( cl_func , dependencies = [ Rand123 ( ) , self . _get_log_prior_cl_func ( ) , self . _get_log_likelihood_cl_func ( ) , SimpleCLCodeObject ( self . _get_state_update_cl_func ( nmr_samples , thinning , return_output ) ) ] )
|
def add_column ( self , col , icol , update_error = True ) :
"""Attempts to add a single column of : math : ` A ` to the Nystroem approximation and updates the local matrices
Parameters
col : ndarray ( ( N , ) , dtype = float )
new column of : math : ` A `
icol : int
index of new column within : math : ` A `
update _ error : bool , optional , default = True
If True , the absolute and relative approximation error will be updated after adding the column .
If False , then not .
Return
success : bool
True if the new column was added to the approximation . False if not ."""
|
# convenience access
k = self . _k
d = self . _d
R = self . _R_k
Winv = self . _W_k_inv
b_new = col [ self . _columns ] [ : , None ]
d_new = d [ icol ]
q_new = R [ : , icol ] [ : , None ]
# calculate R _ new
schur_complement = d_new - np . dot ( b_new . T , q_new )
# Schur complement
if np . isclose ( schur_complement , 0 ) :
return False
# otherwise complete the update
s_new = 1. / schur_complement
qC = np . dot ( b_new . T , R )
# update Winv
Winv_new = np . zeros ( ( k + 1 , k + 1 ) )
Winv_new [ 0 : k , 0 : k ] = Winv + s_new * np . dot ( q_new , q_new . T )
Winv_new [ 0 : k , k ] = - s_new * q_new [ 0 : k , 0 ]
Winv_new [ k , 0 : k ] = - s_new * q_new [ 0 : k , 0 ] . T
Winv_new [ k , k ] = s_new
R_new = np . vstack ( ( R + s_new * np . dot ( q_new , ( qC - col . T ) ) , s_new * ( - qC + col . T ) ) )
# forcing known structure on R _ new
sel_new = np . append ( self . _columns , icol )
R_new [ : , sel_new ] = np . eye ( k + 1 )
# update Winv
self . _W_k_inv = Winv_new
# update R
self . _R_k = R_new
# update C0 _ k
self . _C_k = np . hstack ( ( self . _C_k , col [ : , None ] ) )
# update number of selected columns
self . _k += 1
# add column to present selection
self . _columns = np . append ( self . _columns , icol )
# update error
if update_error :
self . _compute_error ( )
# exit with success
return True
|
def delete ( self , * labels ) :
'''Delete ` labels ` from the store , which involves deleting two
records for each : class : ` Label ` .
: rtype : None
: raises : : exc : ` KeyError ` if any of the ` labels ` could not be found .'''
|
deletes = [ ]
for label in labels :
k1 , k2 = self . _keys_from_label ( label )
deletes . append ( k1 )
deletes . append ( k2 )
self . kvl . delete ( self . TABLE , * deletes )
|
def scale ( self ) :
"""What is a representitive number that reflects the magnitude
of the world holding the paths , for numerical comparisons .
Returns
scale : float
Approximate size of the world holding this path"""
|
# use vertices peak - peak rather than exact extents
scale = float ( ( self . vertices . ptp ( axis = 0 ) ** 2 ) . sum ( ) ** .5 )
return scale
|
def update_attribute_toolbar ( self , key = None ) :
"""Updates the attribute toolbar
Parameters
key : 3 - tuple of Integer , defaults to current cell
\t Cell to which attributes the attributes toolbar is updated"""
|
if key is None :
key = self . actions . cursor
post_command_event ( self , self . ToolbarUpdateMsg , key = key , attr = self . code_array . cell_attributes [ key ] )
|
def set_value ( self , dry_wet : LeakSensorState ) :
"""Set the value of the state to dry or wet ."""
|
value = 0
if dry_wet == self . _dry_wet_type :
value = 1
self . _update_subscribers ( value )
|
def get_prediction_score ( self , node_id ) :
"""Return the prediction score ( if leaf node ) or None if its an
intermediate node .
Parameters
node _ id : id of the node to get the prediction value .
Returns
float or None : returns float value of prediction if leaf node and None
if not .
Examples
. . sourcecode : : python
> > > tree . get _ prediction _ score ( 120 ) # Leaf node
0.251092
> > > tree . get _ prediction _ score ( 120 ) # Not a leaf node
None"""
|
_raise_error_if_not_of_type ( node_id , [ int , long ] , "node_id" )
_numeric_param_check_range ( "node_id" , node_id , 0 , self . num_nodes - 1 )
node = self . nodes [ node_id ]
return None if node . is_leaf is False else node . value
|
def _send_message_to_topic ( self , topic , message , correlation_id = None ) :
"""Send a message to RabbitMQ based on the routing key ( topic ) .
Parameters
topic : str
The routing key ( topic ) where the message should be sent to .
message : FranzEvent
The message to be sent .
Raises
franz . InvalidMessage"""
|
exchange = self . get_exchange_name ( topic )
self . _channel . exchange_declare ( exchange = exchange , exchange_type = 'topic' , durable = True , )
self . _channel . basic_publish ( exchange , topic , self . serialize_message ( message ) , properties = self . get_properties ( correlation_id = correlation_id ) , )
|
def Iaax ( mt , x , * args ) :
"""( Iä ) x : Returns the present value of annuity - certain at the beginning of the first year
and increasing linerly . Arithmetically increasing annuity - anticipatory"""
|
return Sx ( mt , x ) / Dx ( mt , x )
|
def zkronv ( ttA , ttB ) :
"""Do kronecker product between vectors ttA and ttB .
Look about kronecker at : https : / / en . wikipedia . org / wiki / Kronecker _ product
For details about operation refer : https : / / arxiv . org / abs / 1802.02839
: param ttA : first TT - vector ;
: param ttB : second TT - vector ;
: return : operation result in z - order"""
|
Al = _vector . vector . to_list ( ttA )
Bl = _vector . vector . to_list ( ttB )
Hl = [ _np . kron ( B , A ) for ( A , B ) in zip ( Al , Bl ) ]
return _vector . vector . from_list ( Hl )
|
def _check_servers ( self ) :
"""Check the servers variable and convert in a valid tuple form"""
|
new_servers = [ ]
def check_format ( server ) :
if server . scheme not in [ "thrift" , "http" , "https" ] :
raise RuntimeError ( "Unable to recognize protocol: \"%s\"" % _type )
if server . scheme == "thrift" :
if not thrift_connect :
raise RuntimeError ( "If you want to use thrift, please install thrift. \"pip install thrift\"" )
if server . port is None :
raise RuntimeError ( "If you want to use thrift, please provide a port number" )
new_servers . append ( server )
for server in self . servers :
if isinstance ( server , ( tuple , list ) ) :
if len ( list ( server ) ) != 3 :
raise RuntimeError ( "Invalid server definition: \"%s\"" % repr ( server ) )
_type , host , port = server
server = urlparse ( '%s://%s:%s' % ( _type , host , port ) )
check_format ( server )
elif isinstance ( server , six . string_types ) :
if server . startswith ( ( "thrift:" , "http:" , "https:" ) ) :
server = urlparse ( server )
check_format ( server )
continue
else :
tokens = [ t for t in server . split ( ":" ) if t . strip ( ) ]
if len ( tokens ) == 2 :
host = tokens [ 0 ]
try :
port = int ( tokens [ 1 ] )
except ValueError :
raise RuntimeError ( "Invalid port: \"%s\"" % tokens [ 1 ] )
if 9200 <= port <= 9299 :
_type = "http"
elif 9500 <= port <= 9599 :
_type = "thrift"
else :
raise RuntimeError ( "Unable to recognize port-type: \"%s\"" % port )
server = urlparse ( '%s://%s:%s' % ( _type , host , port ) )
check_format ( server )
self . servers = new_servers
|
def port_profile_vlan_profile_switchport_access_mac_vlan_classification_access_vlan_access_mac_address ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
port_profile = ET . SubElement ( config , "port-profile" , xmlns = "urn:brocade.com:mgmt:brocade-port-profile" )
name_key = ET . SubElement ( port_profile , "name" )
name_key . text = kwargs . pop ( 'name' )
vlan_profile = ET . SubElement ( port_profile , "vlan-profile" )
switchport = ET . SubElement ( vlan_profile , "switchport" )
access_mac_vlan_classification = ET . SubElement ( switchport , "access-mac-vlan-classification" )
access = ET . SubElement ( access_mac_vlan_classification , "access" )
vlan = ET . SubElement ( access , "vlan" )
access_vlan_id_key = ET . SubElement ( vlan , "access-vlan-id" )
access_vlan_id_key . text = kwargs . pop ( 'access_vlan_id' )
access_mac_address = ET . SubElement ( vlan , "access-mac-address" )
access_mac_address . text = kwargs . pop ( 'access_mac_address' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def register_event ( self ) :
"""注册事件"""
|
event_bus = Environment . get_instance ( ) . event_bus
event_bus . prepend_listener ( EVENT . PRE_BEFORE_TRADING , self . _pre_before_trading )
event_bus . prepend_listener ( EVENT . POST_SETTLEMENT , self . _post_settlement )
|
def sendMessage ( self , message , thread_id = None , thread_type = ThreadType . USER ) :
"""Deprecated . Use : func : ` fbchat . Client . send ` instead"""
|
return self . send ( Message ( text = message ) , thread_id = thread_id , thread_type = thread_type )
|
def get_driver ( self , desired_capabilities = None ) :
"""Creates a Selenium driver on the basis of the configuration file
upon which this object was created .
: param desired _ capabilities : Capabilities that the caller
desires to override . This have priority over those
capabilities that are set by the configuration file passed
to the builder .
: type desired _ capabilities : class : ` dict `
: returns : A driver .
: raises ValueError : When it can ' t figure out how to create a
browser as specified by the BROWSER
configuration variable ."""
|
override_caps = desired_capabilities or { }
desired_capabilities = self . config . make_selenium_desired_capabilities ( )
desired_capabilities . update ( override_caps )
browser_string = self . config . browser
chromedriver_version = None
if self . remote :
driver = self . remote_service . build_driver ( desired_capabilities )
# There is no equivalent for BrowserStack .
if browser_string == "CHROME" and self . remote_service . name == "saucelabs" :
chromedriver_version = desired_capabilities . get ( "chromedriver-version" , None )
if chromedriver_version is None :
raise ValueError ( "when using Chrome, you must set a " "``chromedriver-version`` capability so that Selenic " "can detect which version of Chromedriver will " "be used." )
else :
if browser_string == "CHROME" :
chromedriver_path = self . local_conf [ "CHROMEDRIVER_PATH" ]
driver = webdriver . Chrome ( chromedriver_path , chrome_options = self . local_conf . get ( "CHROME_OPTIONS" ) , desired_capabilities = desired_capabilities , service_log_path = self . local_conf [ "SERVICE_LOG_PATH" ] , service_args = self . local_conf . get ( "SERVICE_ARGS" ) )
version_line = subprocess . check_output ( [ chromedriver_path , "--version" ] )
version_str = re . match ( ur"^ChromeDriver (\d+\.\d+)" , version_line ) . group ( 1 )
chromedriver_version = StrictVersion ( version_str )
elif browser_string == "FIREFOX" :
profile = self . local_conf . get ( "FIREFOX_PROFILE" ) or FirefoxProfile ( )
binary = self . local_conf . get ( "FIREFOX_BINARY" ) or FirefoxBinary ( )
driver = webdriver . Firefox ( profile , binary , capabilities = desired_capabilities )
elif browser_string == "INTERNETEXPLORER" :
driver = webdriver . Ie ( )
elif browser_string == "OPERA" :
driver = webdriver . Opera ( )
else : # SAFARI
# HTMLUNIT
# HTMLUNITWITHJS
# IPHONE
# IPAD
# ANDROID
# PHANTOMJS
raise ValueError ( "can't start a local " + browser_string )
# Check that what we get is what the config wanted . . .
driver_caps = NormalizedCapabilities ( driver . desired_capabilities )
browser_version = re . sub ( r"\..*$" , "" , driver_caps [ "browserVersion" ] )
if driver_caps [ "platformName" ] . upper ( ) != self . config . platform :
raise ValueError ( "the platform you want is not the one " "you are running selenic on" )
if browser_version != self . config . version :
raise ValueError ( "the version installed is not the one " "you wanted" )
# On BrowserStack we cannot set the version of chromedriver or
# query it . So we make the reasonable assuption that the
# version of chromedriver is greater than 2.13 . ( There have
# been at least 7 releases after 2.13 at the time of writing . )
if ( self . remote_service and self . remote_service . name == "browserstack" ) or ( chromedriver_version is not None and chromedriver_version > StrictVersion ( "2.13" ) ) : # We patch ActionChains .
chromedriver_element_center_patch ( )
# We need to mark the driver as needing the patch .
setattr ( driver , CHROMEDRIVER_ELEMENT_CENTER_PATCH_FLAG , True )
driver = self . patch ( driver )
return driver
|
def sample_greedy ( self ) :
"""Sample a point in the leaf with the max progress ."""
|
if self . leafnode :
return self . sample_bounds ( )
else :
lp = self . lower . max_leaf_progress
gp = self . greater . max_leaf_progress
maxp = max ( lp , gp )
if self . sampling_mode [ 'multiscale' ] :
tp = self . progress
if tp > maxp :
return self . sample_bounds ( )
if gp == maxp :
sampling_mode = self . sampling_mode
sampling_mode [ 'mode' ] = 'greedy'
return self . greater . sample ( sampling_mode = sampling_mode )
else :
sampling_mode = self . sampling_mode
sampling_mode [ 'mode' ] = 'greedy'
return self . lower . sample ( sampling_mode = sampling_mode )
|
def get_instance ( self , payload ) :
"""Build an instance of CredentialListInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . trunking . v1 . trunk . credential _ list . CredentialListInstance
: rtype : twilio . rest . trunking . v1 . trunk . credential _ list . CredentialListInstance"""
|
return CredentialListInstance ( self . _version , payload , trunk_sid = self . _solution [ 'trunk_sid' ] , )
|
def set ( self , key , value ) :
"""Set the value of a key
Args :
key ( string ) : The key used to store this value
value ( string ) : The value to store"""
|
data = self . _load_file ( )
data [ key ] = value
self . _save_file ( data )
|
def _gauss_legendre ( order , composite = 1 ) :
"""Backend function ."""
|
inner = numpy . ones ( order + 1 ) * 0.5
outer = numpy . arange ( order + 1 ) ** 2
outer = outer / ( 16 * outer - 4. )
banded = numpy . diag ( numpy . sqrt ( outer [ 1 : ] ) , k = - 1 ) + numpy . diag ( inner ) + numpy . diag ( numpy . sqrt ( outer [ 1 : ] ) , k = 1 )
vals , vecs = numpy . linalg . eig ( banded )
abscis , weight = vals . real , vecs [ 0 , : ] ** 2
indices = numpy . argsort ( abscis )
abscis , weight = abscis [ indices ] , weight [ indices ]
n_abscis = len ( abscis )
composite = numpy . array ( composite ) . flatten ( )
composite = list ( set ( composite ) )
composite = [ comp for comp in composite if ( comp < 1 ) and ( comp > 0 ) ]
composite . sort ( )
composite = [ 0 ] + composite + [ 1 ]
abscissas = numpy . empty ( n_abscis * ( len ( composite ) - 1 ) )
weights = numpy . empty ( n_abscis * ( len ( composite ) - 1 ) )
for dim in range ( len ( composite ) - 1 ) :
abscissas [ dim * n_abscis : ( dim + 1 ) * n_abscis ] = abscis * ( composite [ dim + 1 ] - composite [ dim ] ) + composite [ dim ]
weights [ dim * n_abscis : ( dim + 1 ) * n_abscis ] = weight * ( composite [ dim + 1 ] - composite [ dim ] )
return abscissas , weights
|
def dist_2 ( x0 , y0 , x1 , y1 ) :
r"""Return the squared distance between two points .
This is faster than calculating distance but should
only be used with comparable ratios .
Parameters
x0 : float
Starting x coordinate
y0 : float
Starting y coordinate
x1 : float
Ending x coordinate
y1 : float
Ending y coordinate
Returns
d2 : float
squared distance
See Also
distance"""
|
d0 = x1 - x0
d1 = y1 - y0
return d0 * d0 + d1 * d1
|
def takes_kwargs ( obj ) -> bool :
"""Checks whether a provided object takes in any positional arguments .
Similar to takes _ arg , we do this for both the _ _ init _ _ function of
the class or a function / method
Otherwise , we raise an error"""
|
if inspect . isclass ( obj ) :
signature = inspect . signature ( obj . __init__ )
elif inspect . ismethod ( obj ) or inspect . isfunction ( obj ) :
signature = inspect . signature ( obj )
else :
raise ConfigurationError ( f"object {obj} is not callable" )
return bool ( any ( [ p . kind == inspect . Parameter . VAR_KEYWORD # type : ignore
for p in signature . parameters . values ( ) ] ) )
|
def _write ( self , value ) :
"""rename to write and import inspect to debut the callstack"""
|
if ' ' in value :
s = inspect . stack ( )
fn = s [ 1 ] . function
super ( ) . write ( '%%DEBUG {} %%' . format ( fn ) )
super ( ) . write ( value )
|
def _wrap_result ( data , columns , index_col = None , coerce_float = True , parse_dates = None ) :
"""Wrap result set of query in a DataFrame ."""
|
frame = DataFrame . from_records ( data , columns = columns , coerce_float = coerce_float )
frame = _parse_date_columns ( frame , parse_dates )
if index_col is not None :
frame . set_index ( index_col , inplace = True )
return frame
|
def move_todo_item ( self , item_id , to ) :
"""Changes the position of an item within its parent list . It does not
currently support reparenting an item . Position 1 is at the top of the
list . Moving an item beyond the end of the list puts it at the bottom
of the list ."""
|
path = '/todos/move_item/%u' % item_id
req = ET . Element ( 'request' )
ET . SubElement ( req , 'to' ) . text = str ( int ( to ) )
return self . _request ( path , req )
|
def _runDeferredFunctions ( deferredFunctions ) :
"""Invoke the specified deferred functions and return a list of names of functions that
raised an exception while being invoked .
: param list [ DeferredFunction ] deferredFunctions : the DeferredFunctions to run
: rtype : list [ str ]"""
|
failures = [ ]
for deferredFunction in deferredFunctions :
try :
deferredFunction . invoke ( )
except :
failures . append ( deferredFunction . name )
logger . exception ( '%s failed.' , deferredFunction )
return failures
|
def from_wire ( wire , keyring = None , request_mac = '' , xfr = False , origin = None , tsig_ctx = None , multi = False , first = True , question_only = False , one_rr_per_rrset = False ) :
"""Convert a DNS wire format message into a message
object .
@ param keyring : The keyring to use if the message is signed .
@ type keyring : dict
@ param request _ mac : If the message is a response to a TSIG - signed request ,
I { request _ mac } should be set to the MAC of that request .
@ type request _ mac : string
@ param xfr : Is this message part of a zone transfer ?
@ type xfr : bool
@ param origin : If the message is part of a zone transfer , I { origin }
should be the origin name of the zone .
@ type origin : dns . name . Name object
@ param tsig _ ctx : The ongoing TSIG context , used when validating zone
transfers .
@ type tsig _ ctx : hmac . HMAC object
@ param multi : Is this message part of a multiple message sequence ?
@ type multi : bool
@ param first : Is this message standalone , or the first of a multi
message sequence ?
@ type first : bool
@ param question _ only : Read only up to the end of the question section ?
@ type question _ only : bool
@ param one _ rr _ per _ rrset : Put each RR into its own RRset
@ type one _ rr _ per _ rrset : bool
@ raises ShortHeader : The message is less than 12 octets long .
@ raises TrailingJunk : There were octets in the message past the end
of the proper DNS message .
@ raises BadEDNS : An OPT record was in the wrong section , or occurred more
than once .
@ raises BadTSIG : A TSIG record was not the last record of the additional
data section .
@ rtype : dns . message . Message object"""
|
m = Message ( id = 0 )
m . keyring = keyring
m . request_mac = request_mac
m . xfr = xfr
m . origin = origin
m . tsig_ctx = tsig_ctx
m . multi = multi
m . first = first
reader = _WireReader ( wire , m , question_only , one_rr_per_rrset )
reader . read ( )
return m
|
def p12d_local ( vertices , lame , mu ) :
"""Local stiffness matrix for P1 elements in 2d ."""
|
assert ( vertices . shape == ( 3 , 2 ) )
A = np . vstack ( ( np . ones ( ( 1 , 3 ) ) , vertices . T ) )
PhiGrad = inv ( A ) [ : , 1 : ]
# gradients of basis functions
R = np . zeros ( ( 3 , 6 ) )
R [ [ [ 0 ] , [ 2 ] ] , [ 0 , 2 , 4 ] ] = PhiGrad . T
R [ [ [ 2 ] , [ 1 ] ] , [ 1 , 3 , 5 ] ] = PhiGrad . T
C = mu * np . array ( [ [ 2 , 0 , 0 ] , [ 0 , 2 , 0 ] , [ 0 , 0 , 1 ] ] ) + lame * np . array ( [ [ 1 , 1 , 0 ] , [ 1 , 1 , 0 ] , [ 0 , 0 , 0 ] ] )
K = det ( A ) / 2.0 * np . dot ( np . dot ( R . T , C ) , R )
return K
|
def parse_component_type ( self , node ) :
"""Parses < ComponentType >
@ param node : Node containing the < ComponentType > element
@ type node : xml . etree . Element
@ raise ParseError : Raised when the component type does not have a
name ."""
|
try :
name = node . lattrib [ 'name' ]
except :
self . raise_error ( '<ComponentType> must specify a name' )
if 'extends' in node . lattrib :
extends = node . lattrib [ 'extends' ]
else :
extends = None
if 'description' in node . lattrib :
description = node . lattrib [ 'description' ]
else :
description = ''
component_type = ComponentType ( name , description , extends )
self . model . add_component_type ( component_type )
self . current_component_type = component_type
self . process_nested_tags ( node )
self . current_component_type = None
|
def find_spec ( self , fullname , path , target = None ) :
"""Claims modules that are under ipynb . fs"""
|
if fullname . startswith ( self . package_prefix ) :
for path in self . _get_paths ( fullname ) :
if os . path . exists ( path ) :
return ModuleSpec ( name = fullname , loader = self . loader_class ( fullname , path ) , origin = path , is_package = ( path . endswith ( '__init__.ipynb' ) or path . endswith ( '__init__.py' ) ) , )
|
def _emit_error ( cls , message ) :
"""Print an error message to STDERR ."""
|
sys . stderr . write ( 'ERROR: {message}\n' . format ( message = message ) )
sys . stderr . flush ( )
|
def absent ( name , user = None ) :
'''Verify that the specified ruby is not installed with rbenv . Rbenv
is installed if necessary .
name
The version of ruby to uninstall
user : None
The user to run rbenv as .
. . versionadded : : 0.17.0
. . versionadded : : 0.16.0'''
|
ret = { 'name' : name , 'result' : None , 'comment' : '' , 'changes' : { } }
if name . startswith ( 'ruby-' ) :
name = re . sub ( r'^ruby-' , '' , name )
ret = _check_rbenv ( ret , user )
if ret [ 'result' ] is False :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Rbenv not installed, {0} not either' . format ( name )
return ret
else :
if __opts__ [ 'test' ] :
ret = _ruby_installed ( ret , name , user = user )
if ret [ 'result' ] :
ret [ 'result' ] = None
ret [ 'comment' ] = 'Ruby {0} is set to be uninstalled' . format ( name )
else :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Ruby {0} is already uninstalled' . format ( name )
return ret
return _check_and_uninstall_ruby ( ret , name , user = user )
|
def mu_so ( species , motif , spin_state ) :
"""Calculates the spin - only magnetic moment for a
given species . Only supports transition metals .
: param species : str or Species
: param motif : " oct " or " tet "
: param spin _ state : " high " or " low "
: return : spin - only magnetic moment in Bohr magnetons"""
|
try :
sp = get_el_sp ( species )
n = sp . get_crystal_field_spin ( coordination = motif , spin_config = spin_state )
# calculation spin - only magnetic moment for this number of unpaired spins
return np . sqrt ( n * ( n + 2 ) )
except AttributeError :
return None
|
def parse ( self , tokenized ) : # pylint : disable = invalid - name
"""Parses input , which is a list of tokens ."""
|
table , trees = _parse ( tokenized , self . grammar )
# Check if the parse succeeded .
if all ( r . lhs != self . start for r in table [ ( 0 , len ( tokenized ) - 1 ) ] ) :
raise ParseError ( 'Parsing failed.' )
parse = trees [ ( 0 , len ( tokenized ) - 1 ) ] [ self . start ]
return self . _to_tree ( revert_cnf ( parse ) )
|
def lies_on_circle ( self , center , radius ) :
'''Tests whether the arc circle ' s center and radius match the given ones within < tol > tolerance .
> > > a = Arc ( ( 0 , 0 ) , 1 , 0 , 0 , False )
> > > a . lies _ on _ circle ( ( tol / 2 , tol / 2 ) , 1 + tol / 2)
True
> > > a . lies _ on _ circle ( ( tol / 2 , tol / 2 ) , 1 - tol )
False'''
|
return np . all ( abs ( np . asarray ( center ) - self . center ) < tol ) and abs ( radius - self . radius ) < tol
|
def get_events_attendees ( self , event_ids ) :
"""Get the attendees of the identified events .
Parameters
event _ ids
List of IDs of events to get attendees for .
Returns
List of tuples of ( event id , ` ` pythonkc _ meetups . types . MeetupMember ` ` ) .
Exceptions
* PythonKCMeetupsBadJson
* PythonKCMeetupsBadResponse
* PythonKCMeetupsMeetupDown
* PythonKCMeetupsNotJson
* PythonKCMeetupsRateLimitExceeded"""
|
query = urllib . urlencode ( { 'key' : self . _api_key , 'event_id' : ',' . join ( event_ids ) } )
url = '{0}?{1}' . format ( RSVPS_URL , query )
data = self . _http_get_json ( url )
rsvps = data [ 'results' ]
return [ ( rsvp [ 'event' ] [ 'id' ] , parse_member_from_rsvp ( rsvp ) ) for rsvp in rsvps if rsvp [ 'response' ] != "no" ]
|
def contact_addresses ( self ) :
"""Provides a reference to contact addresses used by this server .
Obtain a reference to manipulate or iterate existing contact
addresses : :
> > > from smc . elements . servers import ManagementServer
> > > mgt _ server = ManagementServer . objects . first ( )
> > > for contact _ address in mgt _ server . contact _ addresses :
. . . contact _ address
ContactAddress ( location = Default , addresses = [ u ' 1.1.1.1 ' ] )
ContactAddress ( location = foolocation , addresses = [ u ' 12.12.12.12 ' ] )
: rtype : MultiContactAddress"""
|
return MultiContactAddress ( href = self . get_relation ( 'contact_addresses' ) , type = self . typeof , name = self . name )
|
def synchronised ( func ) :
"""Synchronisation decorator ."""
|
# pylint : disable = E0213
def Wrapper ( main_obj , * args , ** kwargs ) :
main_obj . _lock . acquire ( )
# pylint : disable = W0212
try :
return func ( main_obj , * args , ** kwargs )
# pylint : disable = E1102
finally :
main_obj . _lock . release ( )
# pylint : disable = W0212
return Wrapper
|
def _randomize_subject_list ( data_list , random ) :
"""Randomly permute the voxels of a subject list .
The method shuffles the subject one by one in place according to
the random type . If RandomType . NORANDOM , return the original list .
Parameters
data _ list : list of 2D array in shape [ nVxels , nTRs ]
Activity image data list to be shuffled .
random : RandomType
Randomization type .
Returns
None ."""
|
if random == RandomType . REPRODUCIBLE :
for i in range ( len ( data_list ) ) :
_randomize_single_subject ( data_list [ i ] , seed = i )
elif random == RandomType . UNREPRODUCIBLE :
for data in data_list :
_randomize_single_subject ( data )
|
def is_sql_equal ( sqls1 , sqls2 ) :
"""Find out equality of two SQL items .
See https : / / docs . djangoproject . com / en / 1.8 / ref / migration - operations / # runsql .
Args :
sqls1 , sqls2 : SQL items , have the same format as supported by Django ' s RunSQL operation .
Returns :
( bool ) ` True ` if equal , otherwise ` False ` ."""
|
is_seq1 = isinstance ( sqls1 , ( list , tuple ) )
is_seq2 = isinstance ( sqls2 , ( list , tuple ) )
if not is_seq1 :
sqls1 = ( sqls1 , )
if not is_seq2 :
sqls2 = ( sqls2 , )
if len ( sqls1 ) != len ( sqls2 ) :
return False
for sql1 , sql2 in zip ( sqls1 , sqls2 ) :
sql1 , params1 = _sql_params ( sql1 )
sql2 , params2 = _sql_params ( sql2 )
if sql1 != sql2 or params1 != params2 :
return False
return True
|
def _shrink ( self ) :
"""Shrinks the dynamic table to be at or below maxsize"""
|
cursize = self . _current_size
while cursize > self . _maxsize :
name , value = self . dynamic_entries . pop ( )
cursize -= table_entry_size ( name , value )
self . _current_size = cursize
|
def get_pathext ( default_pathext = None ) :
"""Returns the path extensions from environment or a default"""
|
if default_pathext is None :
default_pathext = os . pathsep . join ( [ '.COM' , '.EXE' , '.BAT' , '.CMD' ] )
pathext = os . environ . get ( 'PATHEXT' , default_pathext )
return pathext
|
def repeat ( self , repeats , * args , ** kwargs ) :
"""Repeat elements of an array .
See Also
numpy . ndarray . repeat"""
|
nv . validate_repeat ( args , kwargs )
values = self . _data . repeat ( repeats )
return type ( self ) ( values . view ( 'i8' ) , dtype = self . dtype )
|
def tensormul ( tensor0 : BKTensor , tensor1 : BKTensor , indices : typing . List [ int ] ) -> BKTensor :
r"""Generalization of matrix multiplication to product tensors .
A state vector in product tensor representation has N dimension , one for
each contravariant index , e . g . for 3 - qubit states
: math : ` B ^ { b _ 0 , b _ 1 , b _ 2 } ` . An operator has K dimensions , K / 2 for
contravariant indices ( e . g . ket components ) and K / 2 for covariant ( bra )
indices , e . g . : math : ` A ^ { a _ 0 , a _ 1 } _ { a _ 2 , a _ 3 } ` for a 2 - qubit gate . The given
indices of A are contracted against B , replacing the given positions .
E . g . ` ` tensormul ( A , B , [ 0,2 ] ) ` ` is equivalent to
. . math : :
C ^ { a _ 0 , b _ 1 , a _ 1 } = \ sum _ { i _ 0 , i _ 1 } A ^ { a _ 0 , a _ 1 } _ { i _ 0 , i _ 1 } B ^ { i _ 0 , b _ 1 , i _ 1}
Args :
tensor0 : A tensor product representation of a gate
tensor1 : A tensor product representation of a gate or state
indices : List of indices of tensor1 on which to act .
Returns :
Resultant state or gate tensor"""
|
# Note : This method is the critical computational core of QuantumFlow
# We currently have two implementations , one that uses einsum , the other
# using matrix multiplication
# numpy :
# einsum is much faster particularly for small numbers of qubits
# tensorflow :
# Little different is performance , but einsum would restrict the
# maximum number of qubits to 26 ( Because tensorflow only allows 26
# einsum subscripts at present ]
# torch :
# einsum is slower than matmul
N = rank ( tensor1 )
K = rank ( tensor0 ) // 2
assert K == len ( indices )
out = list ( EINSUM_SUBSCRIPTS [ 0 : N ] )
left_in = list ( EINSUM_SUBSCRIPTS [ N : N + K ] )
left_out = [ out [ idx ] for idx in indices ]
right = list ( EINSUM_SUBSCRIPTS [ 0 : N ] )
for idx , s in zip ( indices , left_in ) :
right [ idx ] = s
subscripts = '' . join ( left_out + left_in + [ ',' ] + right + [ '->' ] + out )
# print ( ' > > > ' , K , N , subscripts )
tensor = einsum ( subscripts , tensor0 , tensor1 )
return tensor
|
def get_home_position ( boatd = None ) :
'''Get the current home position from boatd .
: returns : The configured home position
: rtype : Points'''
|
if boatd is None :
boatd = Boatd ( )
content = boatd . get ( '/waypoints' )
home = content . get ( 'home' , None )
if home is not None :
lat , lon = home
return Point ( lat , lon )
else :
return None
|
async def get_object ( source , * args ) :
"""Get object asynchronously .
: param source : mode class or query to get object from
: param args : lookup parameters
: return : model instance or raises ` ` peewee . DoesNotExist ` ` if object not
found"""
|
warnings . warn ( "get_object() is deprecated, Manager.get() " "should be used instead" , DeprecationWarning )
if isinstance ( source , peewee . Query ) :
query = source
model = query . model
else :
query = source . select ( )
model = source
# Return first object from query
for obj in ( await select ( query . where ( * args ) ) ) :
return obj
# No objects found
raise model . DoesNotExist
|
def _inhibitColumnsWithLateral ( self , overlaps , lateralConnections ) :
"""Performs an experimentatl local inhibition . Local inhibition is
iteratively performed on a column by column basis ."""
|
n , m = self . shape
y = np . zeros ( n )
s = self . sparsity
L = lateralConnections
desiredWeight = self . codeWeight
inhSignal = np . zeros ( n )
sortedIndices = np . argsort ( overlaps , kind = 'mergesort' ) [ : : - 1 ]
currentWeight = 0
for i in sortedIndices :
if overlaps [ i ] < self . _stimulusThreshold :
break
inhTooStrong = ( inhSignal [ i ] >= s )
if not inhTooStrong :
y [ i ] = 1.
currentWeight += 1
inhSignal [ : ] += L [ i , : ]
if self . enforceDesiredWeight and currentWeight == desiredWeight :
break
activeColumns = np . where ( y == 1.0 ) [ 0 ]
return activeColumns
|
def passageLoop ( parent , new_tree , xpath1 , xpath2 = None , preceding_siblings = False , following_siblings = False ) :
"""Loop over passages to construct and increment new tree given a parent and XPaths
: param parent : Parent on which to perform xpath
: param new _ tree : Parent on which to add nodes
: param xpath1 : List of xpath elements
: type xpath1 : [ str ]
: param xpath2 : List of xpath elements
: type xpath2 : [ str ]
: param preceding _ siblings : Append preceding siblings of XPath 1/2 match to the tree
: param following _ siblings : Append following siblings of XPath 1/2 match to the tree
: return : Newly incremented tree"""
|
current_1 , queue_1 = __formatXpath__ ( xpath1 )
if xpath2 is None : # In case we need what is following or preceding our node
result_1 , loop = performXpath ( parent , current_1 )
if loop is True :
queue_1 = xpath1
central = None
has_no_queue = len ( queue_1 ) == 0
# For each sibling , when we need them in the context of a range
if preceding_siblings or following_siblings :
for sibling in xmliter ( parent ) :
if sibling == result_1 :
central = True
# We copy the node we looked for ( Result _ 1)
child = copyNode ( result_1 , children = has_no_queue , parent = new_tree )
# if we don ' t have children
# we loop over the passage child
if not has_no_queue :
passageLoop ( result_1 , child , queue_1 , None , preceding_siblings = preceding_siblings , following_siblings = following_siblings )
# If we were waiting for preceding _ siblings , we break it off
# As we don ' t need to go further
if preceding_siblings :
break
elif not central and preceding_siblings :
copyNode ( sibling , parent = new_tree , children = True )
elif central and following_siblings :
copyNode ( sibling , parent = new_tree , children = True )
else :
result_1 , loop = performXpath ( parent , current_1 )
if loop is True :
queue_1 = xpath1
if xpath2 == xpath1 :
current_2 , queue_2 = current_1 , queue_1
else :
current_2 , queue_2 = __formatXpath__ ( xpath2 )
else :
current_2 , queue_2 = __formatXpath__ ( xpath2 )
if xpath1 != xpath2 :
result_2 , loop = performXpath ( parent , current_2 )
if loop is True :
queue_2 = xpath2
else :
result_2 = result_1
if result_1 == result_2 :
has_no_queue = len ( queue_1 ) == 0
child = copyNode ( result_1 , children = has_no_queue , parent = new_tree )
if not has_no_queue :
passageLoop ( result_1 , child , queue_1 , queue_2 )
else :
start = False
# For each sibling
for sibling in xmliter ( parent ) : # If we have found start
# We copy the node because we are between start and end
if start : # If we are at the end
# We break the copy
if sibling == result_2 :
break
else :
copyNode ( sibling , parent = new_tree , children = True )
# If this is start
# Then we copy it and initiate star
elif sibling == result_1 :
start = True
has_no_queue_1 = len ( queue_1 ) == 0
node = copyNode ( sibling , children = has_no_queue_1 , parent = new_tree )
if not has_no_queue_1 :
passageLoop ( sibling , node , queue_1 , None , following_siblings = True )
continue_loop = len ( queue_2 ) == 0
node = copyNode ( result_2 , children = continue_loop , parent = new_tree )
if not continue_loop :
passageLoop ( result_2 , node , queue_2 , None , preceding_siblings = True )
return new_tree
|
def get_element_dt ( self , el_name , tz = None , el_idx = 0 ) :
"""Return the text of the selected element as a ` ` datetime . datetime ` ` object .
The element text must be a ISO8601 formatted datetime
Args :
el _ name : str
Name of element to use .
tz : datetime . tzinfo
Timezone in which to return the datetime .
- Without a timezone , other contextual information is required in order to
determine the exact represented time .
- If dt has timezone : The ` ` tz ` ` parameter is ignored .
- If dt is naive ( without timezone ) : The timezone is set to ` ` tz ` ` .
- ` ` tz = None ` ` : Prevent naive dt from being set to a timezone . Without a
timezone , other contextual information is required in order to determine
the exact represented time .
- ` ` tz = d1 _ common . date _ time . UTC ( ) ` ` : Set naive dt to UTC .
el _ idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name .
Returns :
datetime . datetime"""
|
return iso8601 . parse_date ( self . get_element_by_name ( el_name , el_idx ) . text , tz )
|
def index_model ( index_name , adapter ) :
'''Indel all objects given a model'''
|
model = adapter . model
log . info ( 'Indexing {0} objects' . format ( model . __name__ ) )
qs = model . objects
if hasattr ( model . objects , 'visible' ) :
qs = qs . visible ( )
if adapter . exclude_fields :
qs = qs . exclude ( * adapter . exclude_fields )
docs = iter_qs ( qs , adapter )
docs = iter_for_index ( docs , index_name )
for ok , info in streaming_bulk ( es . client , docs , raise_on_error = False ) :
if not ok :
log . error ( 'Unable to index %s "%s": %s' , model . __name__ , info [ 'index' ] [ '_id' ] , info [ 'index' ] [ 'error' ] )
|
def get_subregions ( self , iso_code ) :
"""Returns subregion calendar classes for given region iso _ code .
> > > registry = IsoRegistry ( )
> > > # assuming calendars registered are : DE , DE - HH , DE - BE
> > > registry . get _ subregions ( ' DE ' )
{ ' DE - HH ' : < class ' workalendar . europe . germany . Hamburg ' > ,
' DE - BE ' : < class ' workalendar . europe . germany . Berlin ' > }
: rtype dict
: return dict where keys are ISO codes strings
and values are calendar classes"""
|
items = OrderedDict ( )
for key , value in self . region_registry . items ( ) :
code_elements , is_subregion = self . _code_elements ( key )
if is_subregion and code_elements [ 0 ] == iso_code :
items [ key ] = value
return items
|
def add ( self , tensor , tf_sess = None , key = None , ** kwargs ) :
"""Adds a new root * tensor * for a * key * which , if * None * , defaults to a consecutive number .
When * tensor * is not an instance of : py : class : ` Tensor ` but an instance of
` ` tensorflow . Tensor ` ` , it is converted first . In that case , * tf _ sess * should be a valid
tensorflow session and * kwargs * are forwarded to the : py : class : ` Tensor ` constructor ."""
|
if not isinstance ( tensor , Tensor ) :
tensor = Tensor ( tensor , tf_sess , ** kwargs )
if key is None :
if len ( self . roots ) == 0 :
key = 0
else :
key = max ( self . roots . keys ( ) ) + 1
self . roots [ key ] = tensor
|
def __option ( self ) :
"""Check and return option from section from configuration . Option name is equal to option prefix
: return : tuple of section name and option prefix"""
|
section = self . section ( )
option = self . option_prefix ( )
if self . config ( ) . has_option ( section , option ) is False :
raise NoOptionError ( option , section )
return section , option
|
def read_raw_samples ( self , fields , thin_start = None , thin_interval = None , thin_end = None , iteration = None , walkers = None , flatten = True ) :
"""Base function for reading samples .
Parameters
fields : list
The list of field names to retrieve . Must be names of datasets in
the ` ` samples _ group ` ` .
thin _ start : int , optional
Start reading from the given iteration . Default is to start from
the first iteration .
thin _ interval : int , optional
Only read every ` ` thin _ interval ` ` - th sample . Default is 1.
thin _ end : int , optional
Stop reading at the given iteration . Default is to end at the last
iteration .
iteration : int , optional
Only read the given iteration . If this provided , it overrides
the ` ` thin _ ( start | interval | end ) ` ` options .
walkers : int , optional
Only read from the given walkers . Default is to read all .
flatten : bool , optional
Flatten the samples to 1D arrays before returning . Otherwise , the
returned arrays will have shape ( requested walkers x
requested iteration ( s ) ) . Default is True .
Returns
dict
A dictionary of field name - > numpy array pairs ."""
|
if isinstance ( fields , ( str , unicode ) ) :
fields = [ fields ]
# walkers to load
if walkers is not None :
widx = numpy . zeros ( self . nwalkers , dtype = bool )
widx [ walkers ] = True
nwalkers = widx . sum ( )
else :
widx = slice ( 0 , None )
nwalkers = self . nwalkers
# get the slice to use
if iteration is not None :
get_index = int ( iteration )
niterations = 1
else :
get_index = self . get_slice ( thin_start = thin_start , thin_end = thin_end , thin_interval = thin_interval )
# we ' ll just get the number of iterations from the returned shape
niterations = None
# load
group = self . samples_group + '/{name}'
arrays = { }
for name in fields :
arr = self [ group . format ( name = name ) ] [ widx , get_index ]
if niterations is None :
niterations = arr . shape [ - 1 ]
if flatten :
arr = arr . flatten ( )
else : # ensure that the returned array is 2D
arr = arr . reshape ( ( nwalkers , niterations ) )
arrays [ name ] = arr
return arrays
|
def great_circle_Npoints ( lonlat1r , lonlat2r , N ) :
"""N points along the line joining lonlat1 and lonlat2"""
|
ratio = np . linspace ( 0.0 , 1.0 , N ) . reshape ( - 1 , 1 )
xyz1 = lonlat2xyz ( lonlat1r [ 0 ] , lonlat1r [ 1 ] )
xyz2 = lonlat2xyz ( lonlat2r [ 0 ] , lonlat2r [ 1 ] )
mids = ratio * xyz2 + ( 1.0 - ratio ) * xyz1
norm = np . sqrt ( ( mids ** 2 ) . sum ( axis = 1 ) )
xyzN = mids / norm . reshape ( - 1 , 1 )
lonlatN = xyz2lonlat ( xyzN [ : , 0 ] , xyzN [ : , 1 ] , xyzN [ : , 2 ] )
return lonlatN
|
def from_latlon ( latitude , longitude , force_zone_number = None , force_zone_letter = None ) :
"""This function convert Latitude and Longitude to UTM coordinate
Parameters
latitude : float
Latitude between 80 deg S and 84 deg N , e . g . ( - 80.0 to 84.0)
longitude : float
Longitude between 180 deg W and 180 deg E , e . g . ( - 180.0 to 180.0 ) .
force _ zone number : int
Zone Number is represented with global map numbers of an UTM Zone
Numbers Map . You may force conversion including one UTM Zone Number .
More information see utmzones [ 1 ] _
. . _ [ 1 ] : http : / / www . jaworski . ca / utmzones . htm"""
|
if not in_bounds ( latitude , - 80.0 , 84.0 ) :
raise OutOfRangeError ( 'latitude out of range (must be between 80 deg S and 84 deg N)' )
if not in_bounds ( longitude , - 180.0 , 180.0 ) :
raise OutOfRangeError ( 'longitude out of range (must be between 180 deg W and 180 deg E)' )
if force_zone_number is not None :
check_valid_zone ( force_zone_number , force_zone_letter )
lat_rad = mathlib . radians ( latitude )
lat_sin = mathlib . sin ( lat_rad )
lat_cos = mathlib . cos ( lat_rad )
lat_tan = lat_sin / lat_cos
lat_tan2 = lat_tan * lat_tan
lat_tan4 = lat_tan2 * lat_tan2
if force_zone_number is None :
zone_number = latlon_to_zone_number ( latitude , longitude )
else :
zone_number = force_zone_number
if force_zone_letter is None :
zone_letter = latitude_to_zone_letter ( latitude )
else :
zone_letter = force_zone_letter
lon_rad = mathlib . radians ( longitude )
central_lon = zone_number_to_central_longitude ( zone_number )
central_lon_rad = mathlib . radians ( central_lon )
n = R / mathlib . sqrt ( 1 - E * lat_sin ** 2 )
c = E_P2 * lat_cos ** 2
a = lat_cos * ( lon_rad - central_lon_rad )
a2 = a * a
a3 = a2 * a
a4 = a3 * a
a5 = a4 * a
a6 = a5 * a
m = R * ( M1 * lat_rad - M2 * mathlib . sin ( 2 * lat_rad ) + M3 * mathlib . sin ( 4 * lat_rad ) - M4 * mathlib . sin ( 6 * lat_rad ) )
easting = K0 * n * ( a + a3 / 6 * ( 1 - lat_tan2 + c ) + a5 / 120 * ( 5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * E_P2 ) ) + 500000
northing = K0 * ( m + n * lat_tan * ( a2 / 2 + a4 / 24 * ( 5 - lat_tan2 + 9 * c + 4 * c ** 2 ) + a6 / 720 * ( 61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * E_P2 ) ) )
if mixed_signs ( latitude ) :
raise ValueError ( "latitudes must all have the same sign" )
elif negative ( latitude ) :
northing += 10000000
return easting , northing , zone_number , zone_letter
|
def DeleteNodeTags ( r , node , tags , dry_run = False ) :
"""Delete tags from a node .
@ type node : str
@ param node : node to remove tags from
@ type tags : list of str
@ param tags : tags to remove from the node
@ type dry _ run : bool
@ param dry _ run : whether to perform a dry run
@ rtype : int
@ return : job id"""
|
query = { "tag" : tags , "dry-run" : dry_run , }
return r . request ( "delete" , "/2/nodes/%s/tags" % node , query = query )
|
def create_media_asset ( access_token , name , options = "0" ) :
'''Create Media Service Asset .
Args :
access _ token ( str ) : A valid Azure authentication token .
name ( str ) : Media Service Asset Name .
options ( str ) : Media Service Options .
Returns :
HTTP response . JSON body .'''
|
path = '/Assets'
endpoint = '' . join ( [ ams_rest_endpoint , path ] )
body = '{"Name": "' + name + '", "Options": "' + str ( options ) + '"}'
return do_ams_post ( endpoint , path , body , access_token )
|
def get_rendered_transform_path ( self ) :
"""Generates a rendered transform path
that is calculated from all parents .
: return :"""
|
path = self . transform_path
parent = self . parent
while parent is not None :
path = "{0}/{1}" . format ( parent . transform_path , path )
parent = parent . parent
return path
|
def get_base_ami ( awsclient , owners ) :
"""DEPRECATED ! ! !
return the latest version of our base AMI
we can ' t use tags for this , so we have only the name as resource
note : this functionality is deprecated since this only works for " old "
baseami ."""
|
client_ec2 = awsclient . get_client ( 'ec2' )
image_filter = [ { 'Name' : 'state' , 'Values' : [ 'available' , ] } , ]
latest_ts = maya . MayaDT ( 0 ) . datetime ( naive = True )
latest_version = StrictVersion ( '0.0.0' )
latest_id = None
for i in client_ec2 . describe_images ( Owners = owners , Filters = image_filter ) [ 'Images' ] :
m = re . search ( r'(Ops_Base-Image)_(\d+.\d+.\d+)_(\d+)$' , i [ 'Name' ] )
if m :
version = StrictVersion ( m . group ( 2 ) )
# timestamp = m . group ( 3)
creation_date = parse_ts ( i [ 'CreationDate' ] )
if creation_date > latest_ts and version >= latest_version :
latest_id = i [ 'ImageId' ]
latest_ts = creation_date
latest_version = version
return latest_id
|
def saveXml ( self , xml ) :
"""Saves the data for this tree to the inputed xml entry .
: param xml | < xml . etree . ElementTree . Element >
: return < bool > success"""
|
if xml is None :
return False
# save the grouping enabled information
xml . set ( 'groupingActive' , nativestring ( self . isGroupingActive ( ) ) )
# save the grouping information
if self . groupBy ( ) :
xml . set ( 'grouping' , ',' . join ( self . groupBy ( ) ) )
# save standard tree options
return super ( XOrbTreeWidget , self ) . saveXml ( xml )
|
def create_project ( self , key , name = None , assignee = None , type = "Software" , template_name = None ) :
"""Create a project with the specified parameters .
: param key : Mandatory . Must match JIRA project key requirements , usually only 2-10 uppercase characters .
: type : str
: param name : If not specified it will use the key value .
: type name : Optional [ str ]
: param assignee : If not specified it will use current user .
: type assignee : Optional [ str ]
: param type : Determines the type of project should be created .
: type type : Optional [ str ]
: param template _ name : is used to create a project based on one of the existing project templates .
If ` template _ name ` is not specified , then it should use one of the default values .
: type template _ name : Optional [ str ]
: return : Should evaluate to False if it fails otherwise it will be the new project id .
: rtype : Union [ bool , int ]"""
|
if assignee is None :
assignee = self . current_user ( )
if name is None :
name = key
possible_templates = [ 'Basic' , 'JIRA Classic' , 'JIRA Default Schemes' , 'Basic software development' ]
if template_name is not None :
possible_templates = [ template_name ]
# https : / / confluence . atlassian . com / jirakb / creating - a - project - via - rest - based - on - jira - default - schemes - 744325852 . html
templates = self . templates ( )
# TODO ( ssbarnea ) : find a better logic to pick a default fallback template
template_key = list ( templates . values ( ) ) [ 0 ] [ 'projectTemplateModuleCompleteKey' ]
for template_name , template_dic in templates . items ( ) :
if template_name in possible_templates :
template_key = template_dic [ 'projectTemplateModuleCompleteKey' ]
break
payload = { 'name' : name , 'key' : key , 'keyEdited' : 'false' , # ' projectTemplate ' : ' com . atlassian . jira - core - project - templates : jira - issuetracking ' ,
# ' permissionScheme ' : ' ' ,
'projectTemplateWebItemKey' : template_key , 'projectTemplateModuleKey' : template_key , 'lead' : assignee , # ' assigneeType ' : ' 2 ' ,
}
if self . _version [ 0 ] > 6 : # JIRA versions before 7 will throw an error if we specify type parameter
payload [ 'type' ] = type
headers = CaseInsensitiveDict ( { 'Content-Type' : 'application/x-www-form-urlencoded' } )
url = self . _options [ 'server' ] + '/rest/project-templates/latest/templates'
r = self . _session . post ( url , data = payload , headers = headers )
if r . status_code == 200 :
r_json = json_loads ( r )
return r_json
f = tempfile . NamedTemporaryFile ( suffix = '.html' , prefix = 'python-jira-error-create-project-' , delete = False )
f . write ( r . text )
if self . logging :
logging . error ( "Unexpected result while running create project. Server response saved in %s for further investigation [HTTP response=%s]." % ( f . name , r . status_code ) )
return False
|
async def on_raw_333 ( self , message ) :
"""Topic setter and time on channel join ."""
|
target , channel , setter , timestamp = message . params
if not self . in_channel ( channel ) :
return
# No need to sync user since this is most likely outdated info .
self . channels [ channel ] [ 'topic_by' ] = self . _parse_user ( setter ) [ 0 ]
self . channels [ channel ] [ 'topic_set' ] = datetime . datetime . fromtimestamp ( int ( timestamp ) )
|
def get_version ( package_name , ignore_cache = False ) :
"""Get the version which is currently configured by the package"""
|
if ignore_cache :
with microcache . temporarily_disabled ( ) :
found = helpers . regex_in_package_file ( VERSION_SET_REGEX , '_version.py' , package_name , return_match = True )
else :
found = helpers . regex_in_package_file ( VERSION_SET_REGEX , '_version.py' , package_name , return_match = True )
if found is None :
raise ProjectError ( 'found {}, but __version__ is not defined' )
current_version = found [ 'version' ]
return current_version
|
def parse ( self ) :
"""Parse our project file and return : class : ` Project ` object or raise : exc : ` ParseException ` ."""
|
log . debug ( "Parsing Compass .MAK file %s ..." , self . makfilename )
base_location = None
linked_files = [ ]
file_params = set ( )
def parse_linked_file ( value ) :
log . debug ( "Parsing linked file: %s" , value )
value = value . rstrip ( ';' )
toks = value . split ( ',' , 1 )
if len ( toks ) == 1 :
return toks [ 0 ]
else :
return toks [ 0 ]
# TODO : implement link stations and fixed stations
with codecs . open ( self . makfilename , 'rb' , 'windows-1252' ) as makfile :
prev = None
for line in makfile :
line = line . strip ( )
if not line :
continue
header , value = line [ 0 ] , line [ 1 : ]
if prev :
if line . endswith ( ';' ) :
linked_file = parse_linked_file ( prev + line . rstrip ( ';' ) )
linked_files . append ( linked_file )
prev = None
else :
prev += value
continue
if header == '/' :
pass
# comment
elif header == '@' :
value = value . rstrip ( ';' )
base_location = UTMLocation ( * ( float ( v ) for v in value . split ( ',' ) ) )
elif header == '&' :
value = value . rstrip ( ';' )
base_location . datum = value
elif header == '%' :
value = value . rstrip ( ';' )
base_location . convergence = float ( value )
elif header == '!' :
value = value . rstrip ( ';' )
# file _ params = set ( value ) # TODO
elif header == '#' :
if value . endswith ( ';' ) :
linked_files . append ( parse_linked_file ( value ) )
prev = None
else :
prev = value
log . debug ( "Project: base_loc=%s linked_files=%s" , base_location , linked_files )
project = Project ( name_from_filename ( self . makfilename ) , filename = self . makfilename )
project . set_base_location ( base_location )
for linked_file in linked_files : # TODO : we need to support case - insensitive path resolution on case - sensitive filesystems
linked_file_path = os . path . join ( os . path . dirname ( self . makfilename ) , os . path . normpath ( linked_file . replace ( '\\' , '/' ) ) )
datfile = CompassDatParser ( linked_file_path ) . parse ( )
project . add_linked_file ( datfile )
return project
|
def etcal ( et , lenout = _default_len_out ) :
"""Convert from an ephemeris epoch measured in seconds past
the epoch of J2000 to a calendar string format using a
formal calendar free of leapseconds .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / etcal _ c . html
: param et : Ephemeris time measured in seconds past J2000.
: type et : Union [ float , Iterable [ float ] ]
: param lenout : Length of output string .
: type lenout : int
: return : A standard calendar representation of et .
: rtype : str"""
|
lenout = ctypes . c_int ( lenout )
string = stypes . stringToCharP ( lenout )
if hasattr ( et , "__iter__" ) :
strings = [ ]
for t in et :
libspice . etcal_c ( t , lenout , string )
checkForSpiceError ( None )
strings . append ( stypes . toPythonString ( string ) )
return strings
else :
et = ctypes . c_double ( et )
libspice . etcal_c ( et , lenout , string )
return stypes . toPythonString ( string )
|
def compute_nb_samples ( in_prefix ) :
"""Check the number of samples .
: param in _ prefix : the prefix of the input file .
: type in _ prefix : str
: returns : the number of sample in ` ` prefix . fam ` ` ."""
|
file_name = in_prefix + ".tfam"
nb = None
with open ( file_name , 'rb' ) as input_file :
nb = len ( input_file . readlines ( ) )
return nb
|
def v_grammar_unique_defs ( ctx , stmt ) :
"""Verify that all typedefs and groupings are unique
Called for every statement .
Stores all typedefs in stmt . i _ typedef , groupings in stmt . i _ grouping"""
|
defs = [ ( 'typedef' , 'TYPE_ALREADY_DEFINED' , stmt . i_typedefs ) , ( 'grouping' , 'GROUPING_ALREADY_DEFINED' , stmt . i_groupings ) ]
if stmt . parent is None :
defs . extend ( [ ( 'feature' , 'FEATURE_ALREADY_DEFINED' , stmt . i_features ) , ( 'identity' , 'IDENTITY_ALREADY_DEFINED' , stmt . i_identities ) , ( 'extension' , 'EXTENSION_ALREADY_DEFINED' , stmt . i_extensions ) ] )
for ( keyword , errcode , dict ) in defs :
for definition in stmt . search ( keyword ) :
if definition . arg in dict :
other = dict [ definition . arg ]
err_add ( ctx . errors , definition . pos , errcode , ( definition . arg , other . pos ) )
else :
dict [ definition . arg ] = definition
|
def mecanum_drivetrain ( lr_motor , rr_motor , lf_motor , rf_motor , x_wheelbase = 2 , y_wheelbase = 3 , speed = 5 , deadzone = None , ) :
""". . deprecated : : 2018.2.0
Use : class : ` MecanumDrivetrain ` instead"""
|
return MecanumDrivetrain ( x_wheelbase , y_wheelbase , speed , deadzone ) . get_vector ( lr_motor , rr_motor , lf_motor , rf_motor )
|
def _fillna ( expr , value ) :
"""Fill null with value .
: param expr : sequence or scalar
: param value : value to fill into
: return : sequence or scalar"""
|
if isinstance ( expr , SequenceExpr ) :
return FillNa ( _input = expr , _fill_value = value , _data_type = expr . dtype )
elif isinstance ( expr , Scalar ) :
return FillNa ( _input = expr , _fill_value = value , _value_type = expr . dtype )
|
def mklabel ( device , label_type ) :
'''Create a new disklabel ( partition table ) of label _ type .
Type should be one of " aix " , " amiga " , " bsd " , " dvh " , " gpt " , " loop " , " mac " ,
" msdos " , " pc98 " , or " sun " .
CLI Example :
. . code - block : : bash
salt ' * ' partition . mklabel / dev / sda msdos'''
|
if label_type not in set ( [ 'aix' , 'amiga' , 'bsd' , 'dvh' , 'gpt' , 'loop' , 'mac' , 'msdos' , 'pc98' , 'sun' ] ) :
raise CommandExecutionError ( 'Invalid label_type passed to partition.mklabel' )
cmd = ( 'parted' , '-m' , '-s' , device , 'mklabel' , label_type )
out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = False ) . splitlines ( )
return out
|
def register_plugins ( self ) :
"""Load plugins listed in config variable ' PLUGINS ' ."""
|
registered = set ( )
for plugin_fqdn in chain ( self . APP_PLUGINS , self . config [ "PLUGINS" ] ) :
if plugin_fqdn not in registered :
self . register_plugin ( plugin_fqdn )
registered . add ( plugin_fqdn )
|
def convert_advanced_relu ( builder , layer , input_names , output_names , keras_layer ) :
"""Convert an ReLU layer with maximum value from keras to coreml .
Parameters
keras _ layer : layer
A keras layer object .
builder : NeuralNetworkBuilder
A neural network builder object ."""
|
# Get input and output names
input_name , output_name = ( input_names [ 0 ] , output_names [ 0 ] )
if keras_layer . max_value is None :
builder . add_activation ( layer , 'RELU' , input_name , output_name )
return
# No direct support of RELU with max - activation value - use negate and
# clip layers
relu_output_name = output_name + '_relu'
builder . add_activation ( layer , 'RELU' , input_name , relu_output_name )
# negate it
neg_output_name = relu_output_name + '_neg'
builder . add_activation ( layer + '__neg__' , 'LINEAR' , relu_output_name , neg_output_name , [ - 1.0 , 0 ] )
# apply threshold
clip_output_name = relu_output_name + '_clip'
builder . add_unary ( layer + '__clip__' , neg_output_name , clip_output_name , 'threshold' , alpha = - keras_layer . max_value )
# negate it back
builder . add_activation ( layer + '_neg2' , 'LINEAR' , clip_output_name , output_name , [ - 1.0 , 0 ] )
|
def parents ( self , node , relations = None ) :
"""Return all direct parents of specified node .
Wraps networkx by default .
Arguments
node : string
identifier for node in ontology
relations : list of strings
list of relation ( object property ) IDs used to filter"""
|
g = self . get_graph ( )
if node in g :
parents = list ( g . predecessors ( node ) )
if relations is None :
return parents
else :
rset = set ( relations )
return [ p for p in parents if len ( self . child_parent_relations ( node , p , graph = g ) . intersection ( rset ) ) > 0 ]
else :
return [ ]
|
def parse_encoding ( fp ) :
"""Deduce the encoding of a Python source file ( binary mode ) from magic
comment .
It does this in the same way as the ` Python interpreter ` _ _
. . _ _ : http : / / docs . python . org / ref / encodings . html
The ` ` fp ` ` argument should be a seekable file object in binary mode ."""
|
pos = fp . tell ( )
fp . seek ( 0 )
try :
line1 = fp . readline ( )
has_bom = line1 . startswith ( codecs . BOM_UTF8 )
if has_bom :
line1 = line1 [ len ( codecs . BOM_UTF8 ) : ]
m = _PYTHON_MAGIC_COMMENT_re . match ( line1 . decode ( 'ascii' , 'ignore' ) )
if not m :
try :
import parser
parser . suite ( line1 . decode ( 'ascii' , 'ignore' ) )
except ( ImportError , SyntaxError ) : # Either it ' s a real syntax error , in which case the source
# is not valid python source , or line2 is a continuation of
# line1 , in which case we don ' t want to scan line2 for a magic
# comment .
pass
else :
line2 = fp . readline ( )
m = _PYTHON_MAGIC_COMMENT_re . match ( line2 . decode ( 'ascii' , 'ignore' ) )
if has_bom :
if m :
raise SyntaxError ( "python refuses to compile code with both a UTF8" " byte-order-mark and a magic encoding comment" )
return 'utf_8'
elif m :
return m . group ( 1 )
else :
return None
finally :
fp . seek ( pos )
|
def add_qtl_to_map ( qtlfile , mapfile , outputfile = 'map_with_qtls.csv' ) :
"""This function adds to a genetic map for each marker the number
of significant QTLs found .
: arg qtlfile , the output from MapQTL transformed to a csv file via
' parse _ mapqtl _ file ' which contains the closest markers .
: arg mapfile , the genetic map with all the markers .
: kwarg outputfile , the name of the output file in which the map will
be written ."""
|
qtl_list = read_input_file ( qtlfile , ',' )
map_list = read_input_file ( mapfile , ',' )
map_list [ 0 ] . append ( '# QTLs' )
markers = [ ]
markers . append ( map_list [ 0 ] )
qtl_cnt = 0
for marker in map_list [ 1 : ] :
markers . append ( add_qtl_to_marker ( marker , qtl_list [ 1 : ] ) )
qtl_cnt = qtl_cnt + int ( markers [ - 1 ] [ - 1 ] )
LOG . info ( '- %s markers processed in %s' % ( len ( markers ) , mapfile ) )
LOG . info ( '- %s QTLs located in the map: %s' % ( qtl_cnt , outputfile ) )
write_matrix ( outputfile , markers )
|
def get_webhook_url ( deployment_name , space = 'default' , data_source = 'webhook' , token_manager = None , app_url = defaults . APP_URL , ** fields ) :
"""return the webhook URL for posting webhook data to"""
|
import_url = data_engine . get_import_data_url ( deployment_name , app_url = app_url , token_manager = token_manager )
api_key = deployments . get_apikey ( deployment_name , token_manager = token_manager , app_url = app_url )
fields_string = '&' . join ( [ '%s=%s' % ( key , value ) for ( key , value ) in fields . items ( ) ] )
return '%s/api/v1/import/webhook/?space=%s&data_source=%sk&apikey=%s&%s' % ( import_url , space , data_source , api_key , fields_string )
|
def _add_nonce ( self , response ) :
"""Store a nonce from a response we received .
: param twisted . web . iweb . IResponse response : The HTTP response .
: return : The response , unmodified ."""
|
nonce = response . headers . getRawHeaders ( REPLAY_NONCE_HEADER , [ None ] ) [ 0 ]
with LOG_JWS_ADD_NONCE ( raw_nonce = nonce ) as action :
if nonce is None :
raise errors . MissingNonce ( response )
else :
try :
decoded_nonce = Header . _fields [ 'nonce' ] . decode ( nonce . decode ( 'ascii' ) )
action . add_success_fields ( nonce = decoded_nonce )
except DeserializationError as error :
raise errors . BadNonce ( nonce , error )
self . _nonces . add ( decoded_nonce )
return response
|
def include_callback_query_chat_id ( fn = pair , types = 'all' ) :
""": return :
a pair producer that enables static callback query capturing
across seeder and delegator .
: param types :
` ` all ` ` or a list of chat types ( ` ` private ` ` , ` ` group ` ` , ` ` channel ` ` )"""
|
@ _ensure_seeders_list
def p ( seeders , delegator_factory , * args , ** kwargs ) :
return fn ( seeders + [ per_callback_query_chat_id ( types = types ) ] , delegator_factory , * args , include_callback_query = True , ** kwargs )
return p
|
def _load_clublogXML ( self , url = "https://secure.clublog.org/cty.php" , apikey = None , cty_file = None ) :
"""Load and process the ClublogXML file either as a download or from file"""
|
if self . _download :
cty_file = self . _download_file ( url = url , apikey = apikey )
else :
cty_file = self . _lib_filename
header = self . _extract_clublog_header ( cty_file )
cty_file = self . _remove_clublog_xml_header ( cty_file )
cty_dict = self . _parse_clublog_xml ( cty_file )
self . _entities = cty_dict [ "entities" ]
self . _callsign_exceptions = cty_dict [ "call_exceptions" ]
self . _prefixes = cty_dict [ "prefixes" ]
self . _invalid_operations = cty_dict [ "invalid_operations" ]
self . _zone_exceptions = cty_dict [ "zone_exceptions" ]
self . _callsign_exceptions_index = cty_dict [ "call_exceptions_index" ]
self . _prefixes_index = cty_dict [ "prefixes_index" ]
self . _invalid_operations_index = cty_dict [ "invalid_operations_index" ]
self . _zone_exceptions_index = cty_dict [ "zone_exceptions_index" ]
return True
|
def convert_to_flat ( self , builder ) :
"""In this conversion , we always want to return a valid flatbuffer pointer even if all the
contents are blank because sometimes we need to put empty car states into the car list
to make the indices line up ."""
|
physics_offset = None if self . physics is None else self . physics . convert_to_flat ( builder )
DesiredCarState . DesiredCarStateStart ( builder )
if physics_offset is not None :
DesiredCarState . DesiredCarStateAddPhysics ( builder , physics_offset )
if self . boost_amount is not None :
DesiredCarState . DesiredCarStateAddBoostAmount ( builder , Float . CreateFloat ( builder , self . boost_amount ) )
if self . jumped is not None :
DesiredCarState . DesiredCarStateAddJumped ( builder , Bool . CreateBool ( builder , self . jumped ) )
if self . double_jumped is not None :
DesiredCarState . DesiredCarStateAddDoubleJumped ( builder , Bool . CreateBool ( builder , self . double_jumped ) )
return DesiredCarState . DesiredCarStateEnd ( builder )
|
def otherwise ( self , value ) :
"""Evaluates a list of conditions and returns one of multiple possible result expressions .
If : func : ` Column . otherwise ` is not invoked , None is returned for unmatched conditions .
See : func : ` pyspark . sql . functions . when ` for example usage .
: param value : a literal value , or a : class : ` Column ` expression .
> > > from pyspark . sql import functions as F
> > > df . select ( df . name , F . when ( df . age > 3 , 1 ) . otherwise ( 0 ) ) . show ( )
| name | CASE WHEN ( age > 3 ) THEN 1 ELSE 0 END |
| Alice | 0 |
| Bob | 1 |"""
|
v = value . _jc if isinstance ( value , Column ) else value
jc = self . _jc . otherwise ( v )
return Column ( jc )
|
def structs2pandas ( structs ) :
"""convert ctypes structure or structure array to pandas data frame"""
|
try :
import pandas
records = list ( structs2records ( structs ) )
df = pandas . DataFrame . from_records ( records )
# TODO : do this for string columns , for now just for id
# How can we check for string columns , this is not nice :
# df . columns [ df . dtypes = = object ]
if 'id' in df :
df [ "id" ] = df [ "id" ] . apply ( str . rstrip )
return df
except ImportError : # pandas not found , that ' s ok
return structs
|
def format_timestamp ( ts ) :
"""Format the UTC timestamp for Elasticsearch
eg . 2014-07-09T08:37:18.000Z
@ see https : / / docs . python . org / 2 / library / time . html # time . strftime"""
|
tz_info = tz . tzutc ( )
return datetime . fromtimestamp ( ts , tz = tz_info ) . strftime ( "%Y-%m-%dT%H:%M:%S.000Z" )
|
def std_err ( self ) :
"""Standard error of the estimate ( SEE ) . A scalar .
For standard errors of parameters , see _ se _ all , se _ alpha , and se _ beta ."""
|
return np . sqrt ( np . sum ( np . square ( self . resids ) , axis = 0 ) / self . df_err )
|
def inflate_long ( s , always_positive = False ) :
"turns a normalized byte string into a long - int ( adapted from Crypto . Util . number )"
|
out = 0L
negative = 0
if not always_positive and ( len ( s ) > 0 ) and ( ord ( s [ 0 ] ) >= 0x80 ) :
negative = 1
if len ( s ) % 4 :
filler = '\x00'
if negative :
filler = '\xff'
s = filler * ( 4 - len ( s ) % 4 ) + s
for i in range ( 0 , len ( s ) , 4 ) :
out = ( out << 32 ) + struct . unpack ( '>I' , s [ i : i + 4 ] ) [ 0 ]
if negative :
out -= ( 1L << ( 8 * len ( s ) ) )
return out
|
async def shutdown ( sig , loop ) :
"""Gracefully cancel current tasks when app receives a shutdown signal ."""
|
logging . info ( f'Received exit signal {sig.name}...' )
tasks = [ task for task in asyncio . Task . all_tasks ( ) if task is not asyncio . tasks . Task . current_task ( ) ]
for task in tasks :
logging . debug ( f'Cancelling task: {task}' )
task . cancel ( )
results = await asyncio . gather ( * tasks , return_exceptions = True )
logging . debug ( f'Done awaiting cancelled tasks, results: {results}' )
loop . stop ( )
logging . info ( 'Shutdown complete.' )
|
def add_vertex ( self , v ) :
"""Add a vertex to the graph . The vertex must implement _ _ hash _ _ and _ _ eq _ _ as it will be stored in a set .
: param v : vertex
: return : graph owned vertex"""
|
if v not in self . adjacency :
self . adjacency [ v ] = Vertex ( v )
return self . adjacency [ v ] . vertex
|
def get_start_and_end_time ( self , ref = None ) :
"""Specific function to get start time and end time for MonthDateDaterange
: param ref : time in seconds
: type ref : int
: return : tuple with start and end time
: rtype : tuple ( int , int )"""
|
now = time . localtime ( ref )
if self . syear == 0 :
self . syear = now . tm_year
day_start = find_day_by_offset ( self . syear , self . smon , self . smday )
start_time = get_start_of_day ( self . syear , self . smon , day_start )
if self . eyear == 0 :
self . eyear = now . tm_year
day_end = find_day_by_offset ( self . eyear , self . emon , self . emday )
end_time = get_end_of_day ( self . eyear , self . emon , day_end )
now_epoch = time . mktime ( now )
if start_time > end_time : # the period is between years
if now_epoch > end_time : # check for next year
day_end = find_day_by_offset ( self . eyear + 1 , self . emon , self . emday )
end_time = get_end_of_day ( self . eyear + 1 , self . emon , day_end )
else : # it s just that start was the last year
day_start = find_day_by_offset ( self . syear - 1 , self . smon , self . emday )
start_time = get_start_of_day ( self . syear - 1 , self . smon , day_start )
else :
if now_epoch > end_time : # just have to check for next year if necessary
day_start = find_day_by_offset ( self . syear + 1 , self . smon , self . smday )
start_time = get_start_of_day ( self . syear + 1 , self . smon , day_start )
day_end = find_day_by_offset ( self . eyear + 1 , self . emon , self . emday )
end_time = get_end_of_day ( self . eyear + 1 , self . emon , day_end )
return ( start_time , end_time )
|
def searchAccount ( searchTerm , book ) :
"""Searches through account names"""
|
print ( "Search results:\n" )
found = False
# search
for account in book . accounts : # print ( account . fullname )
# name
if searchTerm . lower ( ) in account . fullname . lower ( ) :
print ( account . fullname )
found = True
if not found :
print ( "Search term not found in account names." )
|
def merge_moments ( m_a , m_a2 , m_a3 , m_a4 , n_a , m_b , m_b2 , m_b3 , m_b4 , n_b ) :
'''Merge moments of two samples A and B .
parameters are
m _ a , . . . , m _ a4 = first through fourth moment of sample A
n _ a = size of sample A
m _ b , . . . , m _ b4 = first through fourth moment of sample B
n _ b = size of sample B'''
|
delta = m_b - m_a
delta_2 = delta * delta
delta_3 = delta * delta_2
delta_4 = delta * delta_3
n_x = n_a + n_b
m_x = m_a + delta * n_b / n_x
m_x2 = m_a2 + m_b2 + delta_2 * n_a * n_b / n_x
m_x3 = m_a3 + m_b3 + delta_3 * n_a * n_b * ( n_a - n_b ) + 3 * delta * ( n_a * m_2b - n_b * m_2a ) / n_x
m_x4 = ( m_a4 + m_b4 + delta_4 * ( n_a * n_b * ( n_a * n_a - n_a * n_b + n_b * n_b ) ) / ( n_x ** 3 ) + 6 * delta_2 * ( n_a * n_a * m_b2 + n_b * n_b * m_a2 ) / ( n_x ** 2 ) + 4 * delta * ( n_a * m_b3 - n_b * m_a3 ) / n_x )
return m_x , m_x2 , m_x3 , m_x4 , n_x
|
def analyze ( segments , analysis , lookup = dict ( bipa = { } , dolgo = { } ) ) :
"""Test a sequence for compatibility with CLPA and LingPy .
: param analysis : Pass a ` TranscriptionAnalysis ` instance for cumulative reporting ."""
|
# raise a ValueError in case of empty segments / strings
if not segments :
raise ValueError ( 'Empty sequence.' )
# test if at least one element in ` segments ` has information
# ( helps to catch really badly formed input , such as [ ' \ n ' ]
if not [ segment for segment in segments if segment . strip ( ) ] :
raise ValueError ( 'No information in the sequence.' )
# build the phonologic and sound class analyses
try :
bipa_analysis , sc_analysis = [ ] , [ ]
for s in segments :
a = lookup [ 'bipa' ] . get ( s )
if a is None :
a = lookup [ 'bipa' ] . setdefault ( s , BIPA [ s ] )
bipa_analysis . append ( a )
sc = lookup [ 'dolgo' ] . get ( s )
if sc is None :
sc = lookup [ 'dolgo' ] . setdefault ( s , BIPA . translate ( s , DOLGO ) )
sc_analysis . append ( sc )
except : # noqa
print ( segments )
raise
# compute general errors ; this loop must take place outside the
# following one because the code for computing single errors ( either
# in ` bipa _ analysis ` or in ` soundclass _ analysis ` ) is unnecessary
# complicated
for sound_bipa , sound_class in zip ( bipa_analysis , sc_analysis ) :
if isinstance ( sound_bipa , pyclts . models . UnknownSound ) or sound_class == '?' :
analysis . general_errors += 1
# iterate over the segments and analyses , updating counts of occurrences
# and specific errors
for segment , sound_bipa , sound_class in zip ( segments , bipa_analysis , sc_analysis ) : # update the segment count
analysis . segments . update ( [ segment ] )
# add an error if we got an unknown sound , otherwise just append
# the ` replacements ` dictionary
if isinstance ( sound_bipa , pyclts . models . UnknownSound ) :
analysis . bipa_errors . add ( segment )
else :
analysis . replacements [ sound_bipa . source ] . add ( sound_bipa . __unicode__ ( ) )
# update sound class errors , if any
if sound_class == '?' :
analysis . sclass_errors . add ( segment )
return segments , bipa_analysis , sc_analysis , analysis
|
def set_grads ( params , params_with_grad ) :
"""Copies gradients from param _ with _ grad to params
: param params : dst parameters
: param params _ with _ grad : src parameters"""
|
for param , param_w_grad in zip ( params , params_with_grad ) :
if param . grad is None :
param . grad = torch . nn . Parameter ( torch . empty_like ( param ) )
param . grad . data . copy_ ( param_w_grad . grad . data )
|
def _build_map ( self ) :
"""Read the map file at path ."""
|
if gf . is_py2_narrow_build ( ) :
self . log_warn ( u"Running on a Python 2 narrow build: be aware that Unicode chars above 0x10000 cannot be replaced correctly." )
self . trans_map = { }
with io . open ( self . file_path , "r" , encoding = "utf-8" ) as file_obj :
contents = file_obj . read ( ) . replace ( u"\t" , u" " )
for line in contents . splitlines ( ) : # ignore lines starting with " # " or blank ( after stripping )
if not line . startswith ( u"#" ) :
line = line . strip ( )
if len ( line ) > 0 :
self . _process_map_rule ( line )
|
def retrieveVals ( self ) :
"""Retrieve values for graphs ."""
|
if self . _stats is None :
serverInfo = MemcachedInfo ( self . _host , self . _port , self . _socket_file )
stats = serverInfo . getStats ( )
else :
stats = self . _stats
if stats is None :
raise Exception ( "Undetermined error accesing stats." )
stats [ 'set_hits' ] = stats . get ( 'total_items' )
if stats . has_key ( 'cmd_set' ) and stats . has_key ( 'total_items' ) :
stats [ 'set_misses' ] = stats [ 'cmd_set' ] - stats [ 'total_items' ]
self . saveState ( stats )
if self . hasGraph ( 'memcached_connections' ) :
self . setGraphVal ( 'memcached_connections' , 'conn' , stats . get ( 'curr_connections' ) )
if self . hasGraph ( 'memcached_items' ) :
self . setGraphVal ( 'memcached_items' , 'items' , stats . get ( 'curr_items' ) )
if self . hasGraph ( 'memcached_memory' ) :
self . setGraphVal ( 'memcached_memory' , 'bytes' , stats . get ( 'bytes' ) )
if self . hasGraph ( 'memcached_connrate' ) :
self . setGraphVal ( 'memcached_connrate' , 'conn' , stats . get ( 'total_connections' ) )
if self . hasGraph ( 'memcached_traffic' ) :
self . setGraphVal ( 'memcached_traffic' , 'rxbytes' , stats . get ( 'bytes_read' ) )
self . setGraphVal ( 'memcached_traffic' , 'txbytes' , stats . get ( 'bytes_written' ) )
if self . hasGraph ( 'memcached_reqrate' ) :
self . setGraphVal ( 'memcached_reqrate' , 'set' , stats . get ( 'cmd_set' ) )
self . setGraphVal ( 'memcached_reqrate' , 'get' , stats . get ( 'cmd_get' ) )
if self . graphHasField ( 'memcached_reqrate' , 'del' ) :
self . setGraphVal ( 'memcached_reqrate' , 'del' , safe_sum ( [ stats . get ( 'delete_hits' ) , stats . get ( 'delete_misses' ) ] ) )
if self . graphHasField ( 'memcached_reqrate' , 'cas' ) :
self . setGraphVal ( 'memcached_reqrate' , 'cas' , safe_sum ( [ stats . get ( 'cas_hits' ) , stats . get ( 'cas_misses' ) , stats . get ( 'cas_badval' ) ] ) )
if self . graphHasField ( 'memcached_reqrate' , 'incr' ) :
self . setGraphVal ( 'memcached_reqrate' , 'incr' , safe_sum ( [ stats . get ( 'incr_hits' ) , stats . get ( 'incr_misses' ) ] ) )
if self . graphHasField ( 'memcached_reqrate' , 'decr' ) :
self . setGraphVal ( 'memcached_reqrate' , 'decr' , safe_sum ( [ stats . get ( 'decr_hits' ) , stats . get ( 'decr_misses' ) ] ) )
if self . hasGraph ( 'memcached_statget' ) :
self . setGraphVal ( 'memcached_statget' , 'hit' , stats . get ( 'get_hits' ) )
self . setGraphVal ( 'memcached_statget' , 'miss' , stats . get ( 'get_misses' ) )
self . setGraphVal ( 'memcached_statget' , 'total' , safe_sum ( [ stats . get ( 'get_hits' ) , stats . get ( 'get_misses' ) ] ) )
if self . hasGraph ( 'memcached_statset' ) :
self . setGraphVal ( 'memcached_statset' , 'hit' , stats . get ( 'set_hits' ) )
self . setGraphVal ( 'memcached_statset' , 'miss' , stats . get ( 'set_misses' ) )
self . setGraphVal ( 'memcached_statset' , 'total' , safe_sum ( [ stats . get ( 'set_hits' ) , stats . get ( 'set_misses' ) ] ) )
if self . hasGraph ( 'memcached_statdel' ) :
self . setGraphVal ( 'memcached_statdel' , 'hit' , stats . get ( 'delete_hits' ) )
self . setGraphVal ( 'memcached_statdel' , 'miss' , stats . get ( 'delete_misses' ) )
self . setGraphVal ( 'memcached_statdel' , 'total' , safe_sum ( [ stats . get ( 'delete_hits' ) , stats . get ( 'delete_misses' ) ] ) )
if self . hasGraph ( 'memcached_statcas' ) :
self . setGraphVal ( 'memcached_statcas' , 'hit' , stats . get ( 'cas_hits' ) )
self . setGraphVal ( 'memcached_statcas' , 'miss' , stats . get ( 'cas_misses' ) )
self . setGraphVal ( 'memcached_statcas' , 'badval' , stats . get ( 'cas_badval' ) )
self . setGraphVal ( 'memcached_statcas' , 'total' , safe_sum ( [ stats . get ( 'cas_hits' ) , stats . get ( 'cas_misses' ) , stats . get ( 'cas_badval' ) ] ) )
if self . hasGraph ( 'memcached_statincrdecr' ) :
self . setGraphVal ( 'memcached_statincrdecr' , 'incr_hit' , stats . get ( 'incr_hits' ) )
self . setGraphVal ( 'memcached_statincrdecr' , 'decr_hit' , stats . get ( 'decr_hits' ) )
self . setGraphVal ( 'memcached_statincrdecr' , 'incr_miss' , stats . get ( 'incr_misses' ) )
self . setGraphVal ( 'memcached_statincrdecr' , 'decr_miss' , stats . get ( 'decr_misses' ) )
self . setGraphVal ( 'memcached_statincrdecr' , 'total' , safe_sum ( [ stats . get ( 'incr_hits' ) , stats . get ( 'decr_hits' ) , stats . get ( 'incr_misses' ) , stats . get ( 'decr_misses' ) ] ) )
if self . hasGraph ( 'memcached_statevict' ) :
self . setGraphVal ( 'memcached_statevict' , 'evict' , stats . get ( 'evictions' ) )
if self . graphHasField ( 'memcached_statevict' , 'reclaim' ) :
self . setGraphVal ( 'memcached_statevict' , 'reclaim' , stats . get ( 'reclaimed' ) )
if self . hasGraph ( 'memcached_statauth' ) :
self . setGraphVal ( 'memcached_statauth' , 'reqs' , stats . get ( 'auth_cmds' ) )
self . setGraphVal ( 'memcached_statauth' , 'errors' , stats . get ( 'auth_errors' ) )
if self . hasGraph ( 'memcached_hitpct' ) :
prev_stats = self . _prev_stats
for ( field_name , field_hits , field_misses ) in ( ( 'set' , 'set_hits' , 'set_misses' ) , ( 'get' , 'get_hits' , 'get_misses' ) , ( 'del' , 'delete_hits' , 'delete_misses' ) , ( 'cas' , 'cas_hits' , 'cas_misses' ) , ( 'incr' , 'incr_hits' , 'incr_misses' ) , ( 'decr' , 'decr_hits' , 'decr_misses' ) ) :
if prev_stats :
if ( stats . has_key ( field_hits ) and prev_stats . has_key ( field_hits ) and stats . has_key ( field_misses ) and prev_stats . has_key ( field_misses ) ) :
hits = stats [ field_hits ] - prev_stats [ field_hits ]
misses = stats [ field_misses ] - prev_stats [ field_misses ]
total = hits + misses
if total > 0 :
val = 100.0 * hits / total
else :
val = 0
self . setGraphVal ( 'memcached_hitpct' , field_name , round ( val , 2 ) )
|
def reset_service_group ( self , loadbal_id , group_id ) :
"""Resets all the connections on the service group .
: param int loadbal _ id : The id of the loadbal
: param int group _ id : The id of the service group to reset"""
|
_filter = { 'virtualServers' : { 'id' : utils . query_filter ( group_id ) } }
virtual_servers = self . lb_svc . getVirtualServers ( id = loadbal_id , filter = _filter , mask = 'serviceGroups' )
actual_id = virtual_servers [ 0 ] [ 'serviceGroups' ] [ 0 ] [ 'id' ]
svc = self . client [ 'Network_Application_Delivery_Controller' '_LoadBalancer_Service_Group' ]
return svc . kickAllConnections ( id = actual_id )
|
def compute_dataset_statistics ( dataset_path , output_path , config ) :
"""Compute the statistics of fields of a TensorDataset
Parameters
dataset _ path : str
path to the dataset
output _ dir : str
where to save the data
config : : obj : ` YamlConfig `
parameters for the analysis"""
|
# parse config
analysis_fields = config [ 'analysis_fields' ]
num_percentiles = config [ 'num_percentiles' ]
thresholds = config [ 'thresholds' ]
log_rate = config [ 'log_rate' ]
num_bins = config [ 'num_bins' ]
font_size = config [ 'font_size' ]
line_width = config [ 'line_width' ]
dpi = config [ 'dpi' ]
# create dataset for the aggregated results
dataset = TensorDataset . open ( dataset_path )
num_datapoints = dataset . num_datapoints
# allocate buffers
analysis_data = { }
for field in analysis_fields :
analysis_data [ field ] = [ ]
# loop through dataset
for i in range ( num_datapoints ) :
if i % log_rate == 0 :
logging . info ( 'Reading datapoint %d of %d' % ( i + 1 , num_datapoints ) )
# read datapoint
datapoint = dataset . datapoint ( i , analysis_fields )
for key , value in datapoint . iteritems ( ) :
analysis_data [ key ] . append ( value )
# create output CSV
stats_headers = { 'name' : 'str' , 'mean' : 'float' , 'median' : 'float' , 'std' : 'float' }
for i in range ( num_percentiles ) :
pctile = int ( ( 100.0 / num_percentiles ) * i )
field = '%d_pctile' % ( pctile )
stats_headers [ field ] = 'float'
for t in thresholds :
field = 'pct_above_%.3f' % ( t )
stats_headers [ field ] = 'float'
# analyze statistics
for field , data in analysis_data . iteritems ( ) : # init arrays
data = np . array ( data )
# init filename
stats_filename = os . path . join ( output_path , '%s_stats.json' % ( field ) )
if os . path . exists ( stats_filename ) :
logging . warning ( 'Statistics file %s exists!' % ( stats_filename ) )
# stats
mean = np . mean ( data )
median = np . median ( data )
std = np . std ( data )
stats = { 'name' : str ( field ) , 'mean' : float ( mean ) , 'median' : float ( median ) , 'std' : float ( std ) , }
for i in range ( num_percentiles ) :
pctile = int ( ( 100.0 / num_percentiles ) * i )
pctile_field = '%d_pctile' % ( pctile )
stats [ pctile_field ] = float ( np . percentile ( data , pctile ) )
for t in thresholds :
t_field = 'pct_above_%.3f' % ( t )
stats [ t_field ] = float ( np . mean ( 1 * ( data > t ) ) )
json . dump ( stats , open ( stats_filename , 'w' ) , indent = 2 , sort_keys = True )
# histogram
num_unique = np . unique ( data ) . shape [ 0 ]
nb = min ( num_bins , data . shape [ 0 ] , num_unique )
bounds = ( np . min ( data ) , np . max ( data ) )
vis2d . figure ( )
utils . histogram ( data , nb , bounds , normalized = False , plot = True )
vis2d . xlabel ( field , fontsize = font_size )
vis2d . ylabel ( 'Count' , fontsize = font_size )
data_filename = os . path . join ( output_path , 'histogram_%s.pdf' % ( field ) )
vis2d . show ( data_filename , dpi = dpi )
|
def get_fobj ( fname , mode = 'w+' ) :
"""Obtain a proper file object .
Parameters
fname : string , file object , file descriptor
If a string or file descriptor , then we create a file object . If
* fname * is a file object , then we do nothing and ignore the specified
* mode * parameter .
mode : str
The mode of the file to be opened .
Returns
fobj : file object
The file object .
close : bool
If * fname * was a string , then * close * will be * True * to signify that
the file object should be closed after writing to it . Otherwise ,
* close * will be * False * signifying that the user , in essence ,
created the file object already and that subsequent operations
should not close it ."""
|
if is_string_like ( fname ) :
fobj = open ( fname , mode )
close = True
elif hasattr ( fname , 'write' ) : # fname is a file - like object , perhaps a StringIO ( for example )
fobj = fname
close = False
else : # assume it is a file descriptor
fobj = os . fdopen ( fname , mode )
close = False
return fobj , close
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.