signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def collect ( self ) :
"""Do pre - flight checks , get list of db names , collect metrics , publish"""
|
if psycopg2 is None :
self . log . error ( 'Unable to import module psycopg2' )
return { }
# Get list of databases
dbs = self . _get_db_names ( )
if len ( dbs ) == 0 :
self . log . error ( "I have 0 databases!" )
return { }
if self . config [ 'metrics' ] :
metrics = self . config [ 'metrics' ]
elif str_to_bool ( self . config [ 'extended' ] ) :
metrics = registry [ 'extended' ]
if str_to_bool ( self . config [ 'has_admin' ] ) and 'WalSegmentStats' not in metrics :
metrics . append ( 'WalSegmentStats' )
else :
metrics = registry [ 'basic' ]
# Iterate every QueryStats class
for metric_name in set ( metrics ) :
if metric_name not in metrics_registry :
self . log . error ( 'metric_name %s not found in metric registry' % metric_name )
continue
for dbase in dbs :
conn = self . _connect ( database = dbase )
try :
klass = metrics_registry [ metric_name ]
stat = klass ( dbase , conn , underscore = self . config [ 'underscore' ] )
stat . fetch ( self . config [ 'pg_version' ] )
for metric , value in stat :
if value is not None :
self . publish ( metric , value )
# Setting multi _ db to True will run this query on all known
# databases . This is bad for queries that hit views like
# pg _ database , which are shared across databases .
# If multi _ db is False , bail early after the first query
# iteration . Otherwise , continue to remaining databases .
if stat . multi_db is False :
break
finally :
conn . close ( )
|
def automain ( self , function ) :
"""Decorator that defines * and runs * the main function of the experiment .
The decorated function is marked as the default command for this
experiment , and the command - line interface is automatically run when
the file is executed .
The method decorated by this should be last in the file because is
equivalent to :
. . code - block : : python
@ ex . main
def my _ main ( ) :
pass
if _ _ name _ _ = = ' _ _ main _ _ ' :
ex . run _ commandline ( )"""
|
captured = self . main ( function )
if function . __module__ == '__main__' : # Ensure that automain is not used in interactive mode .
import inspect
main_filename = inspect . getfile ( function )
if ( main_filename == '<stdin>' or ( main_filename . startswith ( '<ipython-input-' ) and main_filename . endswith ( '>' ) ) ) :
raise RuntimeError ( 'Cannot use @ex.automain decorator in ' 'interactive mode. Use @ex.main instead.' )
self . run_commandline ( )
return captured
|
def premis_to_data ( premis_lxml_el ) :
"""Transform a PREMIS ` ` lxml . _ Element ` ` instance to a Python tuple ."""
|
premis_version = premis_lxml_el . get ( "version" , utils . PREMIS_VERSION )
nsmap = utils . PREMIS_VERSIONS_MAP [ premis_version ] [ "namespaces" ]
return _lxml_el_to_data ( premis_lxml_el , "premis" , nsmap )
|
def map_t ( func ) :
"""Transformation for Sequence . map
: param func : map function
: return : transformation"""
|
return Transformation ( 'map({0})' . format ( name ( func ) ) , partial ( map , func ) , { ExecutionStrategies . PARALLEL } )
|
def add ( self , callback = None , name = None , shortcut = None , alias = None , docstring = None , menu = None , verbose = True ) :
"""Add an action with a keyboard shortcut ."""
|
if callback is None : # Allow to use either add ( func ) or @ add or @ add ( . . . ) .
return partial ( self . add , name = name , shortcut = shortcut , alias = alias , menu = menu )
assert callback
# Get the name from the callback function if needed .
name = name or callback . __name__
alias = alias or _alias ( name )
name = name . replace ( '&' , '' )
shortcut = shortcut or self . _default_shortcuts . get ( name , None )
# Skip existing action .
if name in self . _actions_dict :
return
# Set the status tip from the function ' s docstring .
docstring = docstring or callback . __doc__ or name
docstring = re . sub ( r'[ \t\r\f\v]{2,}' , ' ' , docstring . strip ( ) )
# Create and register the action .
action = _create_qaction ( self . gui , name , callback , shortcut , docstring = docstring , alias = alias , )
action_obj = Bunch ( qaction = action , name = name , alias = alias , shortcut = shortcut , callback = callback , menu = menu )
if verbose and not name . startswith ( '_' ) :
logger . log ( 5 , "Add action `%s` (%s)." , name , _get_shortcut_string ( action . shortcut ( ) ) )
self . gui . addAction ( action )
# Add the action to the menu .
menu = menu or self . menu
# Do not show private actions in the menu .
if menu and not name . startswith ( '_' ) :
self . gui . get_menu ( menu ) . addAction ( action )
self . _actions_dict [ name ] = action_obj
# Register the alias - > name mapping .
self . _aliases [ alias ] = name
# Set the callback method .
if callback :
setattr ( self , name , callback )
|
def generate_ngrams ( args , parser ) :
"""Adds n - grams data to the data store ."""
|
store = utils . get_data_store ( args )
corpus = utils . get_corpus ( args )
if args . catalogue :
catalogue = utils . get_catalogue ( args )
else :
catalogue = None
store . add_ngrams ( corpus , args . min_size , args . max_size , catalogue )
|
def find_module ( self , modname , folder = None ) :
"""Returns a resource corresponding to the given module
returns None if it can not be found"""
|
for src in self . get_source_folders ( ) :
module = _find_module_in_folder ( src , modname )
if module is not None :
return module
for src in self . get_python_path_folders ( ) :
module = _find_module_in_folder ( src , modname )
if module is not None :
return module
if folder is not None :
module = _find_module_in_folder ( folder , modname )
if module is not None :
return module
return None
|
def availableDurations ( self ) :
'''A lesson can always be booked for the length of a single slot , but this method
checks if multiple slots are available . This method requires that slots are
non - overlapping , which needs to be enforced on slot save .'''
|
potential_slots = InstructorAvailabilitySlot . objects . filter ( instructor = self . instructor , location = self . location , room = self . room , pricingTier = self . pricingTier , startTime__gte = self . startTime , startTime__lte = self . startTime + timedelta ( minutes = getConstant ( 'privateLessons__maximumLessonLength' ) ) , ) . exclude ( id = self . id ) . order_by ( 'startTime' )
duration_list = [ self . duration , ]
last_start = self . startTime
last_duration = self . duration
max_duration = self . duration
for slot in potential_slots :
if max_duration + slot . duration > getConstant ( 'privateLessons__maximumLessonLength' ) :
break
if ( slot . startTime == last_start + timedelta ( minutes = last_duration ) and slot . isAvailable ) :
duration_list . append ( max_duration + slot . duration )
last_start = slot . startTime
last_duration = slot . duration
max_duration += slot . duration
return duration_list
|
def ucamel_method ( func ) :
"""Decorator to ensure the given snake _ case method is also written in
UpperCamelCase in the given namespace . That was mainly written to
avoid confusion when using wxPython and its UpperCamelCaseMethods ."""
|
frame_locals = inspect . currentframe ( ) . f_back . f_locals
frame_locals [ snake2ucamel ( func . __name__ ) ] = func
return func
|
def temporal_split ( X , y , test_size = 0.25 ) :
'''Split time series or sequence data along the time axis .
Test data is drawn from the end of each series / sequence
Parameters
X : array - like , shape [ n _ series , . . . ]
Time series data and ( optionally ) contextual data
y : array - like shape [ n _ series , ]
target vector
test _ size : float
between 0 and 1 , amount to allocate to test
Returns
X _ train : array - like , shape [ n _ series , ]
X _ test : array - like , shape [ n _ series , ]
y _ train : array - like , shape [ n _ series , ]
y _ test : array - like , shape [ n _ series , ]'''
|
if test_size <= 0. or test_size >= 1. :
raise ValueError ( "temporal_split: test_size must be >= 0.0 and <= 1.0" " (was %.1f)" % test_size )
Ns = len ( y )
# number of series
check_ts_data ( X , y )
Xt , Xc = get_ts_data_parts ( X )
train_size = 1. - test_size
train_ind = [ np . arange ( 0 , int ( train_size * len ( Xt [ i ] ) ) ) for i in range ( Ns ) ]
test_ind = [ np . arange ( len ( train_ind [ i ] ) , len ( Xt [ i ] ) ) for i in range ( Ns ) ]
X_train = [ Xt [ i ] [ train_ind [ i ] ] for i in range ( Ns ) ]
X_test = [ Xt [ i ] [ test_ind [ i ] ] for i in range ( Ns ) ]
if Xc is not None :
X_train = TS_Data ( X_train , Xc )
X_test = TS_Data ( X_test , Xc )
if len ( np . atleast_1d ( y [ 0 ] ) ) == len ( Xt [ 0 ] ) : # y is a time series
y_train = [ y [ i ] [ train_ind [ i ] ] for i in range ( Ns ) ]
y_test = [ y [ i ] [ test_ind [ i ] ] for i in range ( Ns ) ]
else : # y is contextual
y_train = y
y_test = y
return X_train , X_test , y_train , y_test
|
def request ( url , args = None , data = None , headers = None , method = None , credentials = None , raw_response = False , stats = None ) :
"""Issues HTTP requests .
Args :
url : the URL to request .
args : optional query string arguments .
data : optional data to be sent within the request .
headers : optional headers to include in the request .
method : optional HTTP method to use . If unspecified this is inferred
( GET or POST ) based on the existence of request data .
credentials : optional set of credentials to authorize the request .
raw _ response : whether the raw response content should be returned as - is .
stats : an optional dictionary that , if provided , will be populated with some
useful info about the request , like ' duration ' in seconds and ' data _ size ' in
bytes . These may be useful optimizing the access to rate - limited APIs .
Returns :
The parsed response object .
Raises :
Exception when the HTTP request fails or the response cannot be processed ."""
|
if headers is None :
headers = { }
headers [ 'user-agent' ] = 'GoogleCloudDataLab/1.0'
# Add querystring to the URL if there are any arguments .
if args is not None :
qs = urllib . parse . urlencode ( args )
url = url + '?' + qs
# Setup method to POST if unspecified , and appropriate request headers
# if there is data to be sent within the request .
if data is not None :
if method is None :
method = 'POST'
if data != '' : # If there is a content type specified , use it ( and the data ) as - is .
# Otherwise , assume JSON , and serialize the data object .
if 'Content-Type' not in headers :
data = json . dumps ( data )
headers [ 'Content-Type' ] = 'application/json'
headers [ 'Content-Length' ] = str ( len ( data ) )
else :
if method == 'POST' :
headers [ 'Content-Length' ] = '0'
# If the method is still unset , i . e . it was unspecified , and there
# was no data to be POSTed , then default to GET request .
if method is None :
method = 'GET'
http = Http . http
# Authorize with credentials if given
if credentials is not None : # Make a copy of the shared http instance before we modify it .
http = copy . copy ( http )
http = google_auth_httplib2 . AuthorizedHttp ( credentials )
if stats is not None :
stats [ 'duration' ] = datetime . datetime . utcnow ( )
response = None
try :
log . debug ( 'request: method[%(method)s], url[%(url)s], body[%(data)s]' % locals ( ) )
response , content = http . request ( url , method = method , body = data , headers = headers )
if 200 <= response . status < 300 :
if raw_response :
return content
if type ( content ) == str :
return json . loads ( content )
else :
return json . loads ( str ( content , encoding = 'UTF-8' ) )
else :
raise RequestException ( response . status , content )
except ValueError :
raise Exception ( 'Failed to process HTTP response.' )
except httplib2 . HttpLib2Error :
raise Exception ( 'Failed to send HTTP request.' )
finally :
if stats is not None :
stats [ 'data_size' ] = len ( data )
stats [ 'status' ] = response . status
stats [ 'duration' ] = ( datetime . datetime . utcnow ( ) - stats [ 'duration' ] ) . total_seconds ( )
|
def get_object ( self , queryset = None ) :
'''Get the guest list from the URL'''
|
return get_object_or_404 ( GuestList . objects . filter ( id = self . kwargs . get ( 'guestlist_id' ) ) )
|
def _compute_forearc_backarc_term ( self , C , sites , dists ) :
"""Computes the forearc / backarc scaling term given by equation ( 4 ) ."""
|
f_faba = np . zeros_like ( dists . rhypo )
# Term only applies to backarc sites ( F _ FABA = 0 . for forearc )
max_dist = dists . rhypo [ sites . backarc ]
max_dist [ max_dist < 85.0 ] = 85.0
f_faba [ sites . backarc ] = C [ 'theta7' ] + ( C [ 'theta8' ] * np . log ( max_dist / 40.0 ) )
return f_faba
|
def intersect ( self , other ) :
"""Calculate the intersection of this rectangle and another rectangle .
Args :
other ( Rect ) : The other rectangle .
Returns :
Rect : The intersection of this rectangle and the given other rectangle , or None if there is no such
intersection ."""
|
intersection = Rect ( )
if lib . SDL_IntersectRect ( self . _ptr , self . _ptr , intersection . _ptr ) :
return intersection
else :
return None
|
def get_account_details ( self , account ) :
"""Get the account details ."""
|
result = { }
try :
luser = self . _get_account ( account . username )
luser = preload ( luser , database = self . _database )
except ObjectDoesNotExist :
return result
for i , j in luser . items ( ) :
if i != 'userPassword' and j is not None :
result [ i ] = j
return result
|
def _check ( self ) :
"""Check that entry attributes are legal ."""
|
# Run the super method
super ( Photometry , self ) . _check ( )
err_str = None
has_flux = self . _KEYS . FLUX in self
has_flux_dens = self . _KEYS . FLUX_DENSITY in self
has_u_flux = self . _KEYS . U_FLUX in self
has_u_flux_dens = self . _KEYS . U_FLUX_DENSITY in self
has_freq = self . _KEYS . FREQUENCY in self
has_band = self . _KEYS . BAND in self
has_ener = self . _KEYS . ENERGY in self
has_u_freq = self . _KEYS . U_FREQUENCY in self
has_u_ener = self . _KEYS . U_ENERGY in self
if has_flux or has_flux_dens :
if not any ( [ has_freq , has_band , has_ener ] ) :
err_str = ( "Has `{}` or `{}`" . format ( self . _KEYS . FLUX , self . _KEYS . FLUX_DENSITY ) + " but None of `{}`, `{}`, `{}`" . format ( self . _KEYS . FREQUENCY , self . _KEYS . BAND , self . _KEYS . ENERGY ) )
elif has_flux and not has_u_flux :
err_str = "`{}` provided without `{}`." . format ( self . _KEYS . FLUX , self . _KEYS . U_FLUX )
elif has_flux_dens and not has_u_flux_dens :
err_str = "`{}` provided without `{}`." . format ( self . _KEYS . FLUX_DENSITY , self . _KEYS . U_FLUX_DENSITY )
elif has_freq and not has_u_freq :
err_str = "`{}` provided without `{}`." . format ( self . _KEYS . FREQUENCY , self . _KEYS . U_FREQUENCY )
elif has_ener and not has_u_ener :
err_str = "`{}` provided without `{}`." . format ( self . _KEYS . ENERGY , self . _KEYS . U_ENERGY )
if err_str is not None :
raise ValueError ( err_str )
return
|
def get ( self , key ) :
"""Return set of descendants of node named ` key ` in ` target _ graph ` .
Returns from cached dict if exists , otherwise compute over the graph
and cache results in the dict ."""
|
if key not in self :
self [ key ] = set ( get_descendants ( self . _target_graph , key ) )
return self [ key ]
|
def get_datetime ( strings : Sequence [ str ] , prefix : str , datetime_format_string : str , ignoreleadingcolon : bool = False , precedingline : str = "" ) -> Optional [ datetime . datetime ] :
"""Fetches a ` ` datetime . datetime ` ` parameter via : func : ` get _ string ` ."""
|
x = get_string ( strings , prefix , ignoreleadingcolon = ignoreleadingcolon , precedingline = precedingline )
if len ( x ) == 0 :
return None
# For the format strings you can pass to datetime . datetime . strptime , see
# http : / / docs . python . org / library / datetime . html
# A typical one is " % d - % b - % Y ( % H : % M : % S ) "
d = datetime . datetime . strptime ( x , datetime_format_string )
return d
|
def parse_port ( port_obj , owner ) :
'''Create a port object of the correct type .
The correct port object type is chosen based on the port . port _ type
property of port _ obj .
@ param port _ obj The CORBA PortService object to wrap .
@ param owner The owner of this port . Should be a Component object or None .
@ return The created port object .'''
|
profile = port_obj . get_port_profile ( )
props = utils . nvlist_to_dict ( profile . properties )
if props [ 'port.port_type' ] == 'DataInPort' :
return DataInPort ( port_obj , owner )
elif props [ 'port.port_type' ] == 'DataOutPort' :
return DataOutPort ( port_obj , owner )
elif props [ 'port.port_type' ] == 'CorbaPort' :
return CorbaPort ( port_obj , owner )
else :
return Port ( port_obj , owner )
|
def orbit_gen ( self ) :
"""Generator for iterating over each orbit ."""
|
if self . norbits == 1 :
yield self
else :
for i in range ( self . norbits ) :
yield self [ : , i ]
|
def rehash ( self , password ) :
"""Recreates the internal hash ."""
|
self . hash = self . _new ( password , self . desired_rounds )
self . rounds = self . desired_rounds
|
def get_hcurves_and_means ( dstore ) :
"""Extract hcurves from the datastore and compute their means .
: returns : curves _ by _ rlz , mean _ curves"""
|
rlzs_assoc = dstore [ 'csm_info' ] . get_rlzs_assoc ( )
getter = getters . PmapGetter ( dstore , rlzs_assoc )
pmaps = getter . get_pmaps ( )
return dict ( zip ( getter . rlzs , pmaps ) ) , dstore [ 'hcurves/mean' ]
|
def concatenate ( arrs , axis = 0 ) :
r"""Concatenate multiple values into a new unitized object .
This is essentially a unit - aware version of ` numpy . concatenate ` . All items
must be able to be converted to the same units . If an item has no units , it will be given
those of the rest of the collection , without conversion . The first units found in the
arguments is used as the final output units .
Parameters
arrs : Sequence of arrays
The items to be joined together
axis : integer , optional
The array axis along which to join the arrays . Defaults to 0 ( the first dimension )
Returns
` pint . Quantity `
New container with the value passed in and units corresponding to the first item ."""
|
dest = 'dimensionless'
for a in arrs :
if hasattr ( a , 'units' ) :
dest = a . units
break
data = [ ]
for a in arrs :
if hasattr ( a , 'to' ) :
a = a . to ( dest ) . magnitude
data . append ( np . atleast_1d ( a ) )
# Use masked array concatenate to ensure masks are preserved , but convert to an
# array if there are no masked values .
data = np . ma . concatenate ( data , axis = axis )
if not np . any ( data . mask ) :
data = np . asarray ( data )
return units . Quantity ( data , dest )
|
def _pdf_value ( pdf , population , fitnesses , fitness_threshold ) :
"""Give the value of a pdf .
This represents the likelihood of a pdf generating solutions
that exceed the threshold ."""
|
# Add the chance of obtaining a solution from the pdf
# when the fitness for that solution exceeds a threshold
value = 0.0
for solution , fitness in zip ( population , fitnesses ) :
if fitness >= fitness_threshold : # 1.0 + chance to avoid issues with chance of 0
value += math . log ( 1.0 + _chance ( solution , pdf ) )
# The official equation states that value is now divided by len ( fitnesses )
# however , this is unnecessary when we are only obtaining the best pdf ,
# because every solution is of the same size
return value
|
def intersectingIntervalIterator ( self , start , end ) :
"""Get an iterator which will iterate over those objects in the tree which
intersect the given interval - sorted in order of start index
: param start : find intervals in the tree that intersect an interval with
with this start index ( inclusive )
: param end : find intervals in the tree that intersect an interval with
with this end index ( exclusive )
: return : an iterator that will yield intersected intervals"""
|
items = self . intersectingInterval ( start , end )
items . sort ( key = lambda x : x . start )
for item in items :
yield item
|
def ssh_key_info_from_key_data ( key_id , priv_key = None ) :
"""Get / load SSH key info necessary for signing .
@ param key _ id { str } Either a private ssh key fingerprint , e . g .
' b3 : f0 : a1:6c : 18:3b : 42:63 : fd : 6e : 57:42:74:17 : d4 : bc ' , or the path to
an ssh private key file ( like ssh ' s IdentityFile config option ) .
@ param priv _ key { str } Optional . SSH private key file data ( PEM format ) .
@ return { dict } with these keys :
- type : " agent "
- signer : Crypto signer class ( a PKCS # 1 v1.5 signer for RSA keys )
- fingerprint : key md5 fingerprint
- algorithm : See ALGO _ FROM _ SSH _ KEY _ TYPE for supported list .
- . . . some others added by ` load _ ssh _ key ( ) `"""
|
if FINGERPRINT_RE . match ( key_id ) and priv_key :
key_info = { "fingerprint" : key_id , "priv_key" : priv_key }
else : # Otherwise , we attempt to load necessary details from ~ / . ssh .
key_info = load_ssh_key ( key_id )
# Load a key signer .
key = None
try :
key = serialization . load_pem_private_key ( key_info [ "priv_key" ] , password = None , backend = default_backend ( ) )
except TypeError as ex :
log . debug ( "could not import key without passphrase (will " "try with passphrase): %s" , ex )
if "priv_key_path" in key_info :
prompt = "Passphrase [%s]: " % key_info [ "priv_key_path" ]
else :
prompt = "Passphrase: "
for i in range ( 3 ) :
passphrase = getpass ( prompt )
if not passphrase :
break
try :
key = serialization . load_pem_private_key ( key_info [ "priv_key" ] , password = passphrase , backend = default_backend ( ) )
except ValueError :
continue
else :
break
if not key :
details = ""
if "priv_key_path" in key_info :
details = " (%s)" % key_info [ "priv_key_path" ]
raise MantaError ( "could not import key" + details )
# If load _ ssh _ key ( ) wasn ' t run , set the algorithm here .
if 'algorithm' not in key_info :
if isinstance ( key , ec . EllipticCurvePrivateKey ) :
key_info [ 'algorithm' ] = ECDSA_ALGO_FROM_KEY_SIZE [ str ( key . key_size ) ]
elif isinstance ( key , rsa . RSAPrivateKey ) :
key_info [ 'algorithm' ] = RSA_STR
else :
raise MantaError ( "Unsupported key type for: {}" . format ( key_id ) )
key_info [ "signer" ] = key
key_info [ "type" ] = "ssh_key"
return key_info
|
def _epoch ( self , X , epoch_idx , batch_size , updates_epoch , constants , show_progressbar ) :
"""Run a single epoch .
This function shuffles the data internally ,
as this improves performance .
Parameters
X : numpy array
The training data .
epoch _ idx : int
The current epoch
batch _ size : int
The batch size
updates _ epoch : int
The number of updates to perform per epoch
constants : dict
A dictionary containing the constants with which to update the
parameters in self . parameters .
show _ progressbar : bool
Whether to show a progressbar during training ."""
|
# Create batches
X_ = self . _create_batches ( X , batch_size )
X_len = np . prod ( X . shape [ : - 1 ] )
# Initialize the previous activation
prev = self . _init_prev ( X_ )
prev = self . distance_function ( X_ [ 0 ] , self . weights ) [ 0 ]
influences = self . _update_params ( prev )
# Iterate over the training data
for idx , x in enumerate ( tqdm ( X_ , disable = not show_progressbar ) ) : # Our batches are padded , so we need to
# make sure we know when we hit the padding
# so we don ' t inadvertently learn zeroes .
diff = X_len - ( idx * batch_size )
if diff and diff < batch_size :
x = x [ : diff ]
# Prev _ activation may be None
if prev is not None :
prev = prev [ : diff ]
# if idx > 0 and idx % update _ step = = 0:
influences = self . _update_params ( prev )
prev = self . _propagate ( x , influences , prev_activation = prev )
|
async def RemoveBlocks ( self , all_ ) :
'''all _ : bool
Returns - > None'''
|
# map input types to rpc msg
_params = dict ( )
msg = dict ( type = 'Controller' , request = 'RemoveBlocks' , version = 5 , params = _params )
_params [ 'all' ] = all_
reply = await self . rpc ( msg )
return reply
|
def common_cli_output_options ( f ) :
"""Add common CLI output options to commands ."""
|
@ click . option ( "-d" , "--debug" , default = False , is_flag = True , help = "Produce debug output during processing." , )
@ click . option ( "-F" , "--output-format" , default = "pretty" , type = click . Choice ( [ "pretty" , "json" , "pretty_json" ] ) , help = "Determines how output is formatted. This is only supported by a " "subset of the commands at the moment (e.g. list)." , )
@ click . option ( "-v" , "--verbose" , is_flag = True , default = False , help = "Produce more output during processing." , )
@ click . pass_context
@ functools . wraps ( f )
def wrapper ( ctx , * args , ** kwargs ) : # pylint : disable = missing - docstring
opts = config . get_or_create_options ( ctx )
opts . debug = kwargs . pop ( "debug" )
opts . output = kwargs . pop ( "output_format" )
opts . verbose = kwargs . pop ( "verbose" )
kwargs [ "opts" ] = opts
return ctx . invoke ( f , * args , ** kwargs )
return wrapper
|
def _build_relations_config ( self , yamlconfig ) :
"""Builds a dictionary from relations configuration while maintaining compatibility"""
|
config = { }
for element in yamlconfig :
if isinstance ( element , str ) :
config [ element ] = { 'relation_name' : element , 'schemas' : [ ] }
elif isinstance ( element , dict ) :
if 'relation_name' not in element or 'schemas' not in element :
self . log . warning ( "Unknown element format for relation element %s" , element )
continue
if not isinstance ( element [ 'schemas' ] , list ) :
self . log . warning ( "Expected a list of schemas for %s" , element )
continue
name = element [ 'relation_name' ]
config [ name ] = { 'relation_name' : name , 'schemas' : element [ 'schemas' ] }
else :
self . log . warning ( 'Unhandled relations config type: {}' . format ( element ) )
return config
|
def getTextualNode ( self , textId , subreference = None , prevnext = False , metadata = False ) :
"""Retrieve a text node from the API
: param textId : CtsTextMetadata Identifier
: type textId : str
: param subreference : CapitainsCtsPassage Reference
: type subreference : str
: param prevnext : Retrieve graph representing previous and next passage
: type prevnext : boolean
: param metadata : Retrieve metadata about the passage and the text
: type metadata : boolean
: return : CapitainsCtsPassage
: rtype : CapitainsCtsPassage"""
|
text = CtsText ( urn = textId , retriever = self . endpoint )
if metadata or prevnext :
return text . getPassagePlus ( reference = subreference )
else :
return text . getTextualNode ( subreference = subreference )
|
def negociate_content ( default = 'json-ld' ) :
'''Perform a content negociation on the format given the Accept header'''
|
mimetype = request . accept_mimetypes . best_match ( ACCEPTED_MIME_TYPES . keys ( ) )
return ACCEPTED_MIME_TYPES . get ( mimetype , default )
|
def final_mass_from_f0_tau ( f0 , tau , l = 2 , m = 2 ) :
"""Returns the final mass ( in solar masses ) based on the given frequency
and damping time .
. . note : :
Currently , only l = m = 2 is supported . Any other indices will raise
a ` ` KeyError ` ` .
Parameters
f0 : float or array
Frequency of the QNM ( in Hz ) .
tau : float or array
Damping time of the QNM ( in seconds ) .
l : int , optional
l - index of the harmonic . Default is 2.
m : int , optional
m - index of the harmonic . Default is 2.
Returns
float or array
The mass of the final black hole . If the combination of frequency
and damping times give an unphysical result , ` ` numpy . nan ` ` will be
returned ."""
|
# from Berti et al . 2006
spin = final_spin_from_f0_tau ( f0 , tau , l = l , m = m )
a , b , c = _berti_mass_constants [ l , m ]
return ( a + b * ( 1 - spin ) ** c ) / ( 2 * numpy . pi * f0 * lal . MTSUN_SI )
|
def expose_endpoint ( self ) :
"""Expose / metrics endpoint on the same Sanic server .
This may be useful if Sanic is launched from a container
and you do not want to expose more than one port for some
reason ."""
|
@ self . _app . route ( '/metrics' , methods = [ 'GET' ] )
async def expose_metrics ( request ) :
return raw ( self . _get_metrics_data ( ) , content_type = CONTENT_TYPE_LATEST )
|
def use_setuptools ( version = DEFAULT_VERSION , download_base = DEFAULT_URL , to_dir = os . curdir , download_delay = 15 ) :
"""Automatically find / download setuptools and make it available on sys . path
` version ` should be a valid setuptools version number that is available
as an egg for download under the ` download _ base ` URL ( which should end with
a ' / ' ) . ` to _ dir ` is the directory where setuptools will be downloaded , if
it is not already available . If ` download _ delay ` is specified , it should
be the number of seconds that will be paused before initiating a download ,
should one be required . If an older version of setuptools is installed ,
this routine will print a message to ` ` sys . stderr ` ` and raise SystemExit in
an attempt to abort the calling script ."""
|
try :
import setuptools
if setuptools . __version__ == '0.0.1' :
print >> sys . stderr , ( "You have an obsolete version of setuptools installed. Please\n" "remove it from your system entirely before rerunning this script." )
sys . exit ( 2 )
except ImportError :
egg = download_setuptools ( version , download_base , to_dir , download_delay )
sys . path . insert ( 0 , egg )
import setuptools ;
setuptools . bootstrap_install_from = egg
import pkg_resources
try :
pkg_resources . require ( "setuptools>=" + version )
except pkg_resources . VersionConflict , e : # XXX could we install in a subprocess here ?
print >> sys . stderr , ( "The required version of setuptools (>=%s) is not available, and\n" "can't be installed while this script is running. Please install\n" " a more recent version first.\n\n(Currently using %r)" ) % ( version , e . args [ 0 ] )
sys . exit ( 2 )
|
def delete ( self , uri , default_response = None ) :
"""Call DELETE on the Gitlab server
> > > gitlab = Gitlab ( host = ' http : / / localhost : 10080 ' , verify _ ssl = False )
> > > gitlab . login ( user = ' root ' , password = ' 5iveL ! fe ' )
> > > gitlab . delete ( ' / users / 5 ' )
: param uri : String with the URI you wish to delete
: param default _ response : Return value if JSONDecodeError
: return : Dictionary containing response data
: raise : HttpError : If invalid response returned"""
|
url = self . api_url + uri
response = requests . delete ( url , headers = self . headers , verify = self . verify_ssl , auth = self . auth , timeout = self . timeout )
return self . success_or_raise ( response , default_response = default_response )
|
def DbDeleteServer ( self , argin ) :
"""Delete server from the database but dont delete device properties
: param argin : Device server name
: type : tango . DevString
: return :
: rtype : tango . DevVoid"""
|
self . _log . debug ( "In DbDeleteServer()" )
if '*' in argin or '%' in argin or not '/' in argin :
self . warn_stream ( "DataBase::db_delete_server(): server name " + argin + " incorrect " )
th_exc ( DB_IncorrectServerName , "failed to delete server, server name incorrect" , "DataBase::DeleteServer()" )
self . db . delete_server ( argin )
|
def install ( cls , handler , fmt = None , use_chroot = True , style = DEFAULT_FORMAT_STYLE ) :
"""Install the : class : ` HostNameFilter ` on a log handler ( only if needed ) .
: param fmt : The log format string to check for ` ` % ( hostname ) ` ` .
: param style : One of the characters ` ` % ` ` , ` ` { ` ` or ` ` $ ` ` ( defaults to
: data : ` DEFAULT _ FORMAT _ STYLE ` ) .
: param handler : The logging handler on which to install the filter .
: param use _ chroot : Refer to : func : ` find _ hostname ( ) ` .
If ` fmt ` is given the filter will only be installed if ` fmt ` uses the
` ` hostname ` ` field . If ` fmt ` is not given the filter is installed
unconditionally ."""
|
if fmt :
parser = FormatStringParser ( style = style )
if not parser . contains_field ( fmt , 'hostname' ) :
return
handler . addFilter ( cls ( use_chroot ) )
|
def POST_AUTH ( self , courseid , classroomid ) : # pylint : disable = arguments - differ
"""Edit a classroom"""
|
course , __ = self . get_course_and_check_rights ( courseid , allow_all_staff = True )
if course . is_lti ( ) :
raise web . notfound ( )
error = False
data = web . input ( tutors = [ ] , groups = [ ] , classroomfile = { } )
if "delete" in data : # Get the classroom
classroom = self . database . classrooms . find_one ( { "_id" : ObjectId ( classroomid ) , "courseid" : courseid } )
if classroom is None :
msg = _ ( "Classroom not found." )
error = True
elif classroom [ 'default' ] :
msg = _ ( "You can't remove your default classroom." )
error = True
else :
self . database . classrooms . find_one_and_update ( { "courseid" : courseid , "default" : True } , { "$push" : { "students" : { "$each" : classroom [ "students" ] } } } )
self . database . classrooms . delete_one ( { "_id" : ObjectId ( classroomid ) } )
raise web . seeother ( self . app . get_homepath ( ) + "/admin/" + courseid + "/classrooms" )
else :
try :
if "upload" in data :
new_data = custom_yaml . load ( data [ "classroomfile" ] . file )
else : # Prepare classroom - like data structure from input
new_data = { "description" : data [ "description" ] , "tutors" : data [ "tutors" ] , "students" : [ ] , "groups" : [ ] }
for index , groupstr in enumerate ( data [ "groups" ] ) :
group = json . loads ( groupstr )
new_data [ "students" ] . extend ( group [ "students" ] )
if index != 0 :
new_data [ "groups" ] . append ( group )
classroom , errored_students = self . update_classroom ( course , classroomid , new_data )
student_list , tutor_list , other_students , users_info = self . get_user_lists ( course , classroom [ "_id" ] )
if len ( errored_students ) > 0 :
msg = _ ( "Changes couldn't be applied for following students :" ) + "<ul>"
for student in errored_students :
msg += "<li>" + student + "</li>"
msg += "</ul>"
error = True
else :
msg = _ ( "Classroom updated." )
except :
classroom = self . database . classrooms . find_one ( { "_id" : ObjectId ( classroomid ) , "courseid" : courseid } )
student_list , tutor_list , other_students , users_info = self . get_user_lists ( course , classroom [ "_id" ] )
msg = _ ( 'An error occurred while parsing the data.' )
error = True
return self . template_helper . get_renderer ( ) . course_admin . edit_classroom ( course , student_list , tutor_list , other_students , users_info , classroom , msg , error )
|
def swipe_down ( self , width : int = 1080 , length : int = 1920 ) -> None :
'''Swipe down .'''
|
self . swipe ( 0.5 * width , 0.2 * length , 0.5 * width , 0.8 * length )
|
def sb_vgp_calc ( dataframe , site_correction = 'yes' , dec_tc = 'dec_tc' , inc_tc = 'inc_tc' ) :
"""This function calculates the angular dispersion of VGPs and corrects
for within site dispersion ( unless site _ correction = ' no ' ) to return
a value S _ b . The input data needs to be within a pandas Dataframe .
Parameters
dataframe : the name of the pandas . DataFrame containing the data
the data frame needs to contain these columns :
dataframe [ ' site _ lat ' ] : latitude of the site
dataframe [ ' site _ lon ' ] : longitude of the site
dataframe [ ' k ' ] : fisher precision parameter for directions
dataframe [ ' vgp _ lat ' ] : VGP latitude
dataframe [ ' vgp _ lon ' ] : VGP longitude
- - - - - the following default parameters can be changes by keyword argument - - - - -
dataframe [ ' inc _ tc ' ] : tilt - corrected inclination
dataframe [ ' dec _ tc ' ] : tilt - corrected declination
plot : default is ' no ' , will make a plot of poles if ' yes '"""
|
# calculate the mean from the directional data
dataframe_dirs = [ ]
for n in range ( 0 , len ( dataframe ) ) :
dataframe_dirs . append ( [ dataframe [ dec_tc ] [ n ] , dataframe [ inc_tc ] [ n ] , 1. ] )
dataframe_dir_mean = pmag . fisher_mean ( dataframe_dirs )
# calculate the mean from the vgp data
dataframe_poles = [ ]
dataframe_pole_lats = [ ]
dataframe_pole_lons = [ ]
for n in range ( 0 , len ( dataframe ) ) :
dataframe_poles . append ( [ dataframe [ 'vgp_lon' ] [ n ] , dataframe [ 'vgp_lat' ] [ n ] , 1. ] )
dataframe_pole_lats . append ( dataframe [ 'vgp_lat' ] [ n ] )
dataframe_pole_lons . append ( dataframe [ 'vgp_lon' ] [ n ] )
dataframe_pole_mean = pmag . fisher_mean ( dataframe_poles )
# calculate mean paleolatitude from the directional data
dataframe [ 'paleolatitude' ] = lat_from_inc ( dataframe_dir_mean [ 'inc' ] )
angle_list = [ ]
for n in range ( 0 , len ( dataframe ) ) :
angle = pmag . angle ( [ dataframe [ 'vgp_lon' ] [ n ] , dataframe [ 'vgp_lat' ] [ n ] ] , [ dataframe_pole_mean [ 'dec' ] , dataframe_pole_mean [ 'inc' ] ] )
angle_list . append ( angle [ 0 ] )
dataframe [ 'delta_mean_pole' ] = angle_list
if site_correction == 'yes' : # use eq . 2 of Cox ( 1970 ) to translate the directional precision parameter
# into pole coordinates using the assumption of a Fisherian distribution in
# directional coordinates and the paleolatitude as calculated from mean
# inclination using the dipole equation
dataframe [ 'K' ] = old_div ( dataframe [ 'k' ] , ( 0.125 * ( 5 + 18 * np . sin ( np . deg2rad ( dataframe [ 'paleolatitude' ] ) ) ** 2 + 9 * np . sin ( np . deg2rad ( dataframe [ 'paleolatitude' ] ) ) ** 4 ) ) )
dataframe [ 'Sw' ] = old_div ( 81 , ( dataframe [ 'K' ] ** 0.5 ) )
summation = 0
N = 0
for n in range ( 0 , len ( dataframe ) ) :
quantity = dataframe [ 'delta_mean_pole' ] [ n ] ** 2 - old_div ( dataframe [ 'Sw' ] [ n ] ** 2 , dataframe [ 'n' ] [ n ] )
summation += quantity
N += 1
Sb = ( ( old_div ( 1.0 , ( N - 1.0 ) ) ) * summation ) ** 0.5
if site_correction == 'no' :
summation = 0
N = 0
for n in range ( 0 , len ( dataframe ) ) :
quantity = dataframe [ 'delta_mean_pole' ] [ n ] ** 2
summation += quantity
N += 1
Sb = ( ( old_div ( 1.0 , ( N - 1.0 ) ) ) * summation ) ** 0.5
return Sb
|
def present ( name , keys = None , user = None , keyserver = None , gnupghome = None , trust = None , ** kwargs ) :
'''Ensure GPG public key is present in keychain
name
The unique name or keyid for the GPG public key .
keys
The keyId or keyIds to add to the GPG keychain .
user
Add GPG keys to the specified user ' s keychain
keyserver
The keyserver to retrieve the keys from .
gnupghome
Override GNUPG Home directory
trust
Trust level for the key in the keychain ,
ignored by default . Valid trust levels :
expired , unknown , not _ trusted , marginally ,
fully , ultimately'''
|
ret = { 'name' : name , 'result' : True , 'changes' : { } , 'comment' : [ ] }
_current_keys = __salt__ [ 'gpg.list_keys' ] ( user = user , gnupghome = gnupghome )
current_keys = { }
for key in _current_keys :
keyid = key [ 'keyid' ]
current_keys [ keyid ] = { }
current_keys [ keyid ] [ 'trust' ] = key [ 'trust' ]
if not keys :
keys = name
if isinstance ( keys , six . string_types ) :
keys = [ keys ]
for key in keys :
if key in current_keys . keys ( ) :
if trust :
if trust in _VALID_TRUST_VALUES :
if current_keys [ key ] [ 'trust' ] != TRUST_MAP [ trust ] : # update trust level
result = __salt__ [ 'gpg.trust_key' ] ( keyid = key , trust_level = trust , user = user , )
if 'result' in result and not result [ 'result' ] :
ret [ 'result' ] = result [ 'result' ]
ret [ 'comment' ] . append ( result [ 'comment' ] )
else :
ret [ 'comment' ] . append ( 'Set trust level for {0} to {1}' . format ( key , trust ) )
else :
ret [ 'comment' ] . append ( 'GPG Public Key {0} already in correct trust state' . format ( key ) )
else :
ret [ 'comment' ] . append ( 'Invalid trust level {0}' . format ( trust ) )
ret [ 'comment' ] . append ( 'GPG Public Key {0} already in keychain ' . format ( key ) )
else :
result = __salt__ [ 'gpg.receive_keys' ] ( keyserver , key , user , gnupghome , )
if 'result' in result and not result [ 'result' ] :
ret [ 'result' ] = result [ 'result' ]
ret [ 'comment' ] . append ( result [ 'comment' ] )
else :
ret [ 'comment' ] . append ( 'Adding {0} to GPG keychain' . format ( name ) )
if trust :
if trust in _VALID_TRUST_VALUES :
result = __salt__ [ 'gpg.trust_key' ] ( keyid = key , trust_level = trust , user = user , )
if 'result' in result and not result [ 'result' ] :
ret [ 'result' ] = result [ 'result' ]
ret [ 'comment' ] . append ( result [ 'comment' ] )
else :
ret [ 'comment' ] . append ( 'Set trust level for {0} to {1}' . format ( key , trust ) )
else :
ret [ 'comment' ] . append ( 'Invalid trust level {0}' . format ( trust ) )
ret [ 'comment' ] = '\n' . join ( ret [ 'comment' ] )
return ret
|
def set ( self , x ) :
"""Set variable values via a dictionary mapping name to value ."""
|
for name , value in iter ( x . items ( ) ) :
if hasattr ( value , "ndim" ) :
if self [ name ] . value . ndim < value . ndim :
self [ name ] . value . itemset ( value . squeeze ( ) )
else :
self [ name ] . value = value
else :
self [ name ] . value . itemset ( value )
|
def load_configuration_from_file ( directory , args ) :
"""Return new ` ` args ` ` with configuration loaded from file ."""
|
args = copy . copy ( args )
directory_or_file = directory
if args . config is not None :
directory_or_file = args . config
options = _get_options ( directory_or_file , debug = args . debug )
args . report = options . get ( 'report' , args . report )
threshold_dictionary = docutils . frontend . OptionParser . thresholds
args . report = int ( threshold_dictionary . get ( args . report , args . report ) )
args . ignore_language = get_and_split ( options , 'ignore_language' , args . ignore_language )
args . ignore_messages = options . get ( 'ignore_messages' , args . ignore_messages )
args . ignore_directives = get_and_split ( options , 'ignore_directives' , args . ignore_directives )
args . ignore_substitutions = get_and_split ( options , 'ignore_substitutions' , args . ignore_substitutions )
args . ignore_roles = get_and_split ( options , 'ignore_roles' , args . ignore_roles )
return args
|
def run_migrations_online ( ) :
"""Run migrations in ' online ' mode .
In this scenario we need to create an Engine
and associate a connection with the context ."""
|
engine = engine_from_config ( config . get_section ( config . config_ini_section ) , prefix = 'sqlalchemy.' , poolclass = pool . NullPool )
connection = engine . connect ( )
context . configure ( connection = connection , target_metadata = target_metadata , compare_type = True )
try :
with context . begin_transaction ( ) :
context . run_migrations ( )
finally :
connection . close ( )
|
def do_pickle_ontology ( filename , g = None ) :
"""from a valid filename , generate the graph instance and pickle it too
note : option to pass a pre - generated graph instance too
2015-09-17 : added code to increase recursion limit if cPickle fails
see http : / / stackoverflow . com / questions / 2134706 / hitting - maximum - recursion - depth - using - pythons - pickle - cpickle"""
|
ONTOSPY_LOCAL_MODELS = get_home_location ( )
get_or_create_home_repo ( )
# ensure all the right folders are there
pickledpath = os . path . join ( ONTOSPY_LOCAL_CACHE , filename + ".pickle" )
# pickledpath = ONTOSPY _ LOCAL _ CACHE + " / " + filename + " . pickle "
if not g :
g = Ontospy ( os . path . join ( ONTOSPY_LOCAL_MODELS , filename ) )
# g = Ontospy ( ONTOSPY _ LOCAL _ MODELS + " / " + filename )
if not GLOBAL_DISABLE_CACHE :
try :
cPickle . dump ( g , open ( pickledpath , "wb" ) )
printDebug ( ".. cached <%s>" % filename , "green" )
except Exception as e :
print ( Style . DIM + "\n.. Failed caching <%s>" % filename + Style . RESET_ALL )
print ( str ( e ) + "\n" )
print ( Style . DIM + "... attempting to increase the recursion limit from %d to %d" % ( sys . getrecursionlimit ( ) , sys . getrecursionlimit ( ) * 10 ) + Style . RESET_ALL )
try :
sys . setrecursionlimit ( sys . getrecursionlimit ( ) * 10 )
cPickle . dump ( g , open ( pickledpath , "wb" ) )
printDebug ( ".. cached <%s>" % filename , "green" )
except Exception as e :
printDebug ( "\n... Failed caching <%s>... Aborting caching operation..." % filename , "error" )
print ( str ( e ) + "\n" )
sys . setrecursionlimit ( int ( sys . getrecursionlimit ( ) / 10 ) )
return g
|
def get_flair_list ( self , subreddit , * args , ** kwargs ) :
"""Return a get _ content generator of flair mappings .
: param subreddit : Either a Subreddit object or the name of the
subreddit to return the flair list for .
The additional parameters are passed directly into
: meth : ` . get _ content ` . Note : the ` url ` , ` root _ field ` , ` thing _ field ` , and
` after _ field ` parameters cannot be altered ."""
|
url = self . config [ 'flairlist' ] . format ( subreddit = six . text_type ( subreddit ) )
return self . get_content ( url , * args , root_field = None , thing_field = 'users' , after_field = 'next' , ** kwargs )
|
def set_format ( self , format ) :
"""Pick the correct default format ."""
|
if self . options . format :
self . format = self . options . format
else :
self . format = self . config [ "default_format_" + format ]
|
def _gather_from_files ( self , config ) :
"""gathers from the files in a way that is convienent to use"""
|
command_file = config . get_help_files ( )
cache_path = os . path . join ( config . get_config_dir ( ) , 'cache' )
cols = _get_window_columns ( )
with open ( os . path . join ( cache_path , command_file ) , 'r' ) as help_file :
data = json . load ( help_file )
self . add_exit ( )
commands = data . keys ( )
for command in commands :
branch = self . command_tree
for word in command . split ( ) :
if word not in self . completable :
self . completable . append ( word )
if not branch . has_child ( word ) :
branch . add_child ( CommandBranch ( word ) )
branch = branch . get_child ( word )
description = data [ command ] [ 'help' ]
self . descrip [ command ] = add_new_lines ( description , line_min = int ( cols ) - 2 * TOLERANCE )
if 'examples' in data [ command ] :
examples = [ ]
for example in data [ command ] [ 'examples' ] :
examples . append ( [ add_new_lines ( example [ 0 ] , line_min = int ( cols ) - 2 * TOLERANCE ) , add_new_lines ( example [ 1 ] , line_min = int ( cols ) - 2 * TOLERANCE ) ] )
self . command_example [ command ] = examples
command_params = data [ command ] . get ( 'parameters' , { } )
for param in command_params :
if '==SUPPRESS==' not in command_params [ param ] [ 'help' ] :
param_aliases = set ( )
for par in command_params [ param ] [ 'name' ] :
param_aliases . add ( par )
self . param_descript [ command + " " + par ] = add_new_lines ( command_params [ param ] [ 'required' ] + " " + command_params [ param ] [ 'help' ] , line_min = int ( cols ) - 2 * TOLERANCE )
if par not in self . completable_param :
self . completable_param . append ( par )
param_doubles = self . command_param_info . get ( command , { } )
for alias in param_aliases :
param_doubles [ alias ] = param_aliases
self . command_param_info [ command ] = param_doubles
|
def likelihood ( self , outcomes , modelparams , expparams ) :
"""Calculates the likelihood function at the states specified
by modelparams and measurement specified by expparams .
This is given by the Born rule and is the probability of
outcomes given the state and measurement operator .
Parameters
outcomes =
measurement outcome
expparams =
Bloch vector of measurement axis and visibility
modelparams =
quantum state Bloch vector"""
|
# By calling the superclass implementation , we can consolidate
# call counting there .
super ( QubitStatePauliModel , self ) . likelihood ( outcomes , modelparams , expparams )
# Note that expparams [ ' axis ' ] has shape ( n _ exp , 3 ) .
pr0 = 0.5 * ( 1 + np . sum ( modelparams * expparams [ 'axis' ] , 1 ) )
# Note that expparams [ ' vis ' ] has shape ( n _ exp , ) .
pr0 = expparams [ 'vis' ] * pr0 + ( 1 - expparams [ 'vis' ] ) * 0.5
pr0 = pr0 [ : , np . newaxis ]
# Now we concatenate over outcomes .
return Model . pr0_to_likelihood_array ( outcomes , pr0 )
|
def print_user ( self , user ) :
'''print a relational database user'''
|
status = "active"
token = user . token
if token in [ 'finished' , 'revoked' ] :
status = token
if token is None :
token = ''
subid = "%s\t%s[%s]" % ( user . id , token , status )
print ( subid )
return subid
|
def get_vhost ( self , vname ) :
"""Returns the attributes of a single named vhost in a dict .
: param string vname : Name of the vhost to get .
: returns dict vhost : Attribute dict for the named vhost"""
|
vname = quote ( vname , '' )
path = Client . urls [ 'vhosts_by_name' ] % vname
vhost = self . _call ( path , 'GET' , headers = Client . json_headers )
return vhost
|
def set_outlet ( self , latitude , longitude , outslope ) :
"""Adds outlet point to project"""
|
self . project_manager . setOutlet ( latitude = latitude , longitude = longitude , outslope = outslope )
|
def get_blobstore ( layout ) :
"""Return Blobstore instance for a given storage layout
Args :
layout ( StorageLayout ) : Target storage layout ."""
|
if layout . is_s3 :
from wal_e . blobstore import s3
blobstore = s3
elif layout . is_wabs :
from wal_e . blobstore import wabs
blobstore = wabs
elif layout . is_swift :
from wal_e . blobstore import swift
blobstore = swift
elif layout . is_gs :
from wal_e . blobstore import gs
blobstore = gs
elif layout . is_file :
from wal_e . blobstore import file
blobstore = file
return blobstore
|
def tohdf5 ( input_files , output_file , n_events , conv_times_to_jte , ** kwargs ) :
"""Convert Any file to HDF5 file"""
|
if len ( input_files ) > 1 :
cprint ( "Preparing to convert {} files to HDF5." . format ( len ( input_files ) ) )
from km3pipe import Pipeline
# noqa
from km3pipe . io import GenericPump , HDF5Sink , HDF5MetaData
# noqa
for input_file in input_files :
cprint ( "Converting '{}'..." . format ( input_file ) )
if len ( input_files ) > 1 :
output_file = input_file + '.h5'
meta_data = kwargs . copy ( )
meta_data [ 'origin' ] = input_file
pipe = Pipeline ( )
pipe . attach ( HDF5MetaData , data = meta_data )
pipe . attach ( GenericPump , filenames = input_file , ** kwargs )
pipe . attach ( StatusBar , every = 250 )
if conv_times_to_jte :
from km3modules . mc import MCTimeCorrector
pipe . attach ( MCTimeCorrector )
pipe . attach ( HDF5Sink , filename = output_file , ** kwargs )
pipe . drain ( n_events )
cprint ( "File '{}' was converted." . format ( input_file ) )
|
def _GetPqlService ( self ) :
"""Lazily initializes a PQL service client ."""
|
if not self . _pql_service :
self . _pql_service = self . _ad_manager_client . GetService ( 'PublisherQueryLanguageService' , self . _version , self . _server )
return self . _pql_service
|
def _delete_org ( self , org_name ) :
"""Send organization delete request to DCNM .
: param org _ name : name of organization to be deleted"""
|
url = self . _del_org_url % ( org_name )
return self . _send_request ( 'DELETE' , url , '' , 'organization' )
|
def get_cmor_fname_meta ( fname ) :
"""Processes a CMOR style file name .
Section 3.3 of the ` Data Reference Syntax ` _ details :
filename = < variable name > _ < mip _ table > _ < model > _ < experiment > _
< ensemble _ member > [ _ < temporal _ subset > ] [ _ < geographical _ info > ] . nc
Temporal subsets are detailed in section 2.4:
Time instants or periods will be represented by a construction
of the form “ N1 - N2 ” , where N1 and N2 are of the form
‘ yyyy [ MM [ dd [ hh [ mm [ ss ] ] ] ] ] [ - suffix ] ’ , where ‘ yyyy ’ , ‘ MM ’ , ‘ dd ’ ,
‘ hh ’ ‘ mm ’ and ‘ ss ’ are integer year , month , day , hour , minute ,
and second , respectively , and the precision with which time is
expressed must unambiguously resolve the interval between
timesamples contained in the file or virtual file
Geographic subsets are also detailed in section 2.4:
The DRS specification for this indicator is a string of the
form g - XXXX [ - YYYY ] . The “ g - ” indicates that some spatial selection
or processing has been done ( i . e . , selection of a sub - global region
and possibly spatial averaging ) .
Arguments :
fname ( str ) : A file name conforming to DRS spec .
Returns :
dict : Metadata as extracted from the filename .
. . _ Data Reference Syntax :
http : / / cmip - pcmdi . llnl . gov / cmip5 / docs / cmip5 _ data _ reference _ syntax . pdf"""
|
if '/' in fname :
fname = os . path . split ( fname ) [ 1 ]
fname = os . path . splitext ( fname ) [ 0 ]
meta = fname . split ( '_' )
res = { }
try :
for key in CMIP5_FNAME_REQUIRED_ATTS :
res [ key ] = meta . pop ( 0 )
except IndexError :
raise PathError ( fname )
# Determine presence and order of optional metadata
if len ( meta ) > 2 :
raise PathError ( fname )
is_geo = lambda x : x [ 0 ] == 'g'
for key in meta :
if is_geo ( key ) :
res [ 'geographical_info' ] = key
else :
res [ 'temporal_subset' ] = key
return res
|
def get_ctype ( rtype , cfunc , * args ) :
"""Call a C function that takes a pointer as its last argument and
return the C object that it contains after the function has finished .
: param rtype : C data type is filled by the function
: param cfunc : C function to call
: param args : Arguments to call function with
: return : A pointer to the specified data type"""
|
val_p = backend . ffi . new ( rtype )
args = args + ( val_p , )
cfunc ( * args )
return val_p [ 0 ]
|
def add_auth ( self , user = None , password = None , pattern = None ) :
"""Add given authentication data ."""
|
if not user or not pattern :
log . warn ( LOG_CHECK , _ ( "missing user or URL pattern in authentication data." ) )
return
entry = dict ( user = user , password = password , pattern = re . compile ( pattern ) , )
self [ "authentication" ] . append ( entry )
|
def post ( self , request , * args , ** kwargs ) :
"""Accepts POST requests , and substitute the data in for the page ' s attributes ."""
|
self . object = self . get_object ( )
self . object . content = request . POST [ 'content' ]
self . object . title = request . POST [ 'title' ]
self . object = self . _mark_html_fields_as_safe ( self . object )
context = self . get_context_data ( object = self . object )
return self . render_to_response ( context , content_type = self . get_mimetype ( ) )
|
def _get_args_for_reloading ( ) :
"""Returns the executable . This contains a workaround for windows
if the executable is incorrectly reported to not have the . exe
extension which can cause bugs on reloading ."""
|
rv = [ sys . executable ]
py_script = sys . argv [ 0 ]
if os . name == 'nt' and not os . path . exists ( py_script ) and os . path . exists ( py_script + '.exe' ) :
py_script += '.exe'
rv . append ( py_script )
rv . extend ( sys . argv [ 1 : ] )
return rv
|
def is_searchable ( self ) :
"""A bool value that indicates whether the person has enough data and
can be sent as a query to the API ."""
|
filter_func = lambda field : field . is_searchable
return bool ( filter ( filter_func , self . names ) or filter ( filter_func , self . emails ) or filter ( filter_func , self . phones ) or filter ( filter_func , self . usernames ) )
|
def _ref_check ( self , case ) :
"""Checks that there is only one reference bus ."""
|
refs = [ bus . _i for bus in case . buses if bus . type == REFERENCE ]
if len ( refs ) == 1 :
return True , refs
else :
logger . error ( "OPF requires a single reference bus." )
return False , refs
|
def unfederate ( self , serverId ) :
"""This operation unfederates an ArcGIS Server from Portal for ArcGIS"""
|
url = self . _url + "/servers/{serverid}/unfederate" . format ( serverid = serverId )
params = { "f" : "json" }
return self . _get ( url = url , param_dict = params , proxy_port = self . _proxy_port , proxy_url = self . _proxy_ur )
|
def _rescanSizes ( self , force = True ) :
"""Zero and recalculate quota sizes to subvolume sizes will be correct ."""
|
status = self . QUOTA_CTL ( cmd = BTRFS_QUOTA_CTL_ENABLE ) . status
logger . debug ( "CTL Status: %s" , hex ( status ) )
status = self . QUOTA_RESCAN_STATUS ( )
logger . debug ( "RESCAN Status: %s" , status )
if not status . flags :
if not force :
return
self . QUOTA_RESCAN ( )
logger . warn ( "Waiting for btrfs quota usage scan..." )
self . QUOTA_RESCAN_WAIT ( )
|
def remove_collisions ( self , min_dist = 0.5 ) :
"""Remove vnodes that are too close to existing atoms in the structure
Args :
min _ dist ( float ) : The minimum distance that a vertex needs to be
from existing atoms ."""
|
vfcoords = [ v . frac_coords for v in self . vnodes ]
sfcoords = self . structure . frac_coords
dist_matrix = self . structure . lattice . get_all_distances ( vfcoords , sfcoords )
all_dist = np . min ( dist_matrix , axis = 1 )
new_vnodes = [ ]
for i , v in enumerate ( self . vnodes ) :
if all_dist [ i ] > min_dist :
new_vnodes . append ( v )
self . vnodes = new_vnodes
|
def dot ( a , b ) :
"""Dot product of two TT - matrices or two TT - vectors"""
|
if hasattr ( a , '__dot__' ) :
return a . __dot__ ( b )
if a is None :
return b
else :
raise ValueError ( 'Dot is waiting for two TT-vectors or two TT- matrices' )
|
def list_exports ( exports = '/etc/exports' ) :
'''List configured exports
CLI Example :
. . code - block : : bash
salt ' * ' nfs . list _ exports'''
|
ret = { }
with salt . utils . files . fopen ( exports , 'r' ) as efl :
for line in salt . utils . stringutils . to_unicode ( efl . read ( ) ) . splitlines ( ) :
if not line :
continue
if line . startswith ( '#' ) :
continue
comps = line . split ( )
# Handle the case where the same path is given twice
if not comps [ 0 ] in ret :
ret [ comps [ 0 ] ] = [ ]
newshares = [ ]
for perm in comps [ 1 : ] :
if perm . startswith ( '/' ) :
newshares . append ( perm )
continue
permcomps = perm . split ( '(' )
permcomps [ 1 ] = permcomps [ 1 ] . replace ( ')' , '' )
hosts = permcomps [ 0 ]
if not isinstance ( hosts , six . string_types ) : # Lists , etc would silently mangle / etc / exports
raise TypeError ( 'hosts argument must be a string' )
options = permcomps [ 1 ] . split ( ',' )
ret [ comps [ 0 ] ] . append ( { 'hosts' : hosts , 'options' : options } )
for share in newshares :
ret [ share ] = ret [ comps [ 0 ] ]
return ret
|
def get_file_to_stream ( self , share_name , directory_name , file_name , stream , start_range = None , end_range = None , validate_content = False , progress_callback = None , max_connections = 2 , timeout = None ) :
'''Downloads a file to a stream , with automatic chunking and progress
notifications . Returns an instance of : class : ` File ` with properties
and metadata .
: param str share _ name :
Name of existing share .
: param str directory _ name :
The path to the directory .
: param str file _ name :
Name of existing file .
: param io . IOBase stream :
Opened file / stream to write to .
: param int start _ range :
Start of byte range to use for downloading a section of the file .
If no end _ range is given , all bytes after the start _ range will be downloaded .
The start _ range and end _ range params are inclusive .
Ex : start _ range = 0 , end _ range = 511 will download first 512 bytes of file .
: param int end _ range :
End of byte range to use for downloading a section of the file .
If end _ range is given , start _ range must be provided .
The start _ range and end _ range params are inclusive .
Ex : start _ range = 0 , end _ range = 511 will download first 512 bytes of file .
: param bool validate _ content :
If set to true , validates an MD5 hash for each retrieved portion of
the file . This is primarily valuable for detecting bitflips on the wire
if using http instead of https as https ( the default ) will already
validate . Note that the service will only return transactional MD5s
for chunks 4MB or less so the first get request will be of size
self . MAX _ CHUNK _ GET _ SIZE instead of self . MAX _ SINGLE _ GET _ SIZE . If
self . MAX _ CHUNK _ GET _ SIZE was set to greater than 4MB an error will be
thrown . As computing the MD5 takes processing time and more requests
will need to be done due to the reduced chunk size there may be some
increase in latency .
: param progress _ callback :
Callback for progress with signature function ( current , total )
where current is the number of bytes transfered so far , and total is
the size of the file if known .
: type progress _ callback : callback function in format of func ( current , total )
: param int max _ connections :
If set to 2 or greater , an initial get will be done for the first
self . MAX _ SINGLE _ GET _ SIZE bytes of the file . If this is the entire file ,
the method returns at this point . If it is not , it will download the
remaining data parallel using the number of threads equal to
max _ connections . Each chunk will be of size self . MAX _ CHUNK _ GET _ SIZE .
If set to 1 , a single large get request will be done . This is not
generally recommended but available if very few threads should be
used , network requests are very expensive , or a non - seekable stream
prevents parallel download . This may also be valuable if the file is
being concurrently modified to enforce atomicity or if many files are
expected to be empty as an extra request is required for empty files
if max _ connections is greater than 1.
: param int timeout :
The timeout parameter is expressed in seconds . This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually .
: return : A File with properties and metadata .
: rtype : : class : ` ~ azure . storage . file . models . File `'''
|
_validate_not_none ( 'share_name' , share_name )
_validate_not_none ( 'file_name' , file_name )
_validate_not_none ( 'stream' , stream )
# If the user explicitly sets max _ connections to 1 , do a single shot download
if max_connections == 1 :
file = self . _get_file ( share_name , directory_name , file_name , start_range = start_range , end_range = end_range , validate_content = validate_content , timeout = timeout )
# Set the download size
download_size = file . properties . content_length
# If max _ connections is greater than 1 , do the first get to establish the
# size of the file and get the first segment of data
else :
if sys . version_info >= ( 3 , ) and not stream . seekable ( ) :
raise ValueError ( _ERROR_PARALLEL_NOT_SEEKABLE )
# The service only provides transactional MD5s for chunks under 4MB .
# If validate _ content is on , get only self . MAX _ CHUNK _ GET _ SIZE for the first
# chunk so a transactional MD5 can be retrieved .
first_get_size = self . MAX_SINGLE_GET_SIZE if not validate_content else self . MAX_CHUNK_GET_SIZE
initial_request_start = start_range if start_range else 0
if end_range and end_range - start_range < first_get_size :
initial_request_end = end_range
else :
initial_request_end = initial_request_start + first_get_size - 1
# Send a context object to make sure we always retry to the initial location
operation_context = _OperationContext ( location_lock = True )
try :
file = self . _get_file ( share_name , directory_name , file_name , start_range = initial_request_start , end_range = initial_request_end , validate_content = validate_content , timeout = timeout , _context = operation_context )
# Parse the total file size and adjust the download size if ranges
# were specified
file_size = _parse_length_from_content_range ( file . properties . content_range )
if end_range : # Use the end _ range unless it is over the end of the file
download_size = min ( file_size , end_range - start_range + 1 )
elif start_range :
download_size = file_size - start_range
else :
download_size = file_size
except AzureHttpError as ex :
if not start_range and ex . status_code == 416 : # Get range will fail on an empty file . If the user did not
# request a range , do a regular get request in order to get
# any properties .
file = self . _get_file ( share_name , directory_name , file_name , validate_content = validate_content , timeout = timeout , _context = operation_context )
# Set the download size to empty
download_size = 0
else :
raise ex
# Mark the first progress chunk . If the file is small or this is a single
# shot download , this is the only call
if progress_callback :
progress_callback ( file . properties . content_length , download_size )
# Write the content to the user stream
# Clear file content since output has been written to user stream
if file . content is not None :
stream . write ( file . content )
file . content = None
# If the file is small or single shot download was used , the download is
# complete at this point . If file size is large , use parallel download .
if file . properties . content_length != download_size : # At this point would like to lock on something like the etag so that
# if the file is modified , we dont get a corrupted download . However ,
# this feature is not yet available on the file service .
end_file = file_size
if end_range : # Use the end _ range unless it is over the end of the file
end_file = min ( file_size , end_range + 1 )
_download_file_chunks ( self , share_name , directory_name , file_name , download_size , self . MAX_CHUNK_GET_SIZE , first_get_size , initial_request_end + 1 , # start where the first download ended
end_file , stream , max_connections , progress_callback , validate_content , timeout , operation_context , )
# Set the content length to the download size instead of the size of
# the last range
file . properties . content_length = download_size
# Overwrite the content range to the user requested range
file . properties . content_range = 'bytes {0}-{1}/{2}' . format ( start_range , end_range , file_size )
# Overwrite the content MD5 as it is the MD5 for the last range instead
# of the stored MD5
# TODO : Set to the stored MD5 when the service returns this
file . properties . content_md5 = None
return file
|
def _init_loaders ( self ) -> None :
"""This creates the loaders instances and subscribes to their updates ."""
|
for loader in settings . I18N_TRANSLATION_LOADERS :
loader_class = import_class ( loader [ 'loader' ] )
instance = loader_class ( )
instance . on_update ( self . update )
run ( instance . load ( ** loader [ 'params' ] ) )
|
def get_diskinfo ( opts , show_all = False , local_only = False ) :
'''Returns a list holding the current disk info ,
stats divided by the ouptut unit .'''
|
disks = [ ]
outunit = opts . outunit
for drive in get_drives ( ) :
drive += ':\\'
disk = DiskInfo ( dev = drive )
try :
usage = get_fs_usage ( drive )
except WindowsError : # disk not ready , request aborted , etc .
if show_all :
usage = _diskusage ( 0 , 0 , 0 )
else :
continue
disk . ocap = usage . total
disk . cap = usage . total / outunit
disk . used = usage . used / outunit
disk . free = usage . free / outunit
disk . label = get_vol_info ( drive ) . name
if usage . total :
disk . pcnt = float ( usage . used ) / usage . total * 100
else :
disk . pcnt = 0
disk . mntp = ''
disk . ismntd = True
# TODO needs work
# type is not working on Win7 under VirtualBox ?
dtint , dtstr = get_drive_type ( drive )
setattr ( disk , * _drive_type_result [ dtint ] )
disk . rw = os . access ( drive , os . W_OK )
# doesn ' t work on optical
if usage . total : # this not giving correct result on Win7 RTM either
disk . rw = stat . S_IMODE ( os . stat ( drive ) . st_mode ) & stat . S_IWRITE
else :
disk . rw = False
disks . append ( disk )
if opts . debug :
for disk in disks :
print ( disk . dev , disk , '\n' )
return disks
|
def get_files_from_dir ( path , recursive = True , depth = 0 , file_ext = '.py' ) :
"""Retrieve the list of files from a folder .
@ param path : file or directory where to search files
@ param recursive : if True will search also sub - directories
@ param depth : if explore recursively , the depth of sub directories to follow
@ param file _ ext : the files extension to get . Default is ' . py '
@ return : the file list retrieved . if the input is a file then a one element list ."""
|
file_list = [ ]
if os . path . isfile ( path ) or path == '-' :
return [ path ]
if path [ - 1 ] != os . sep :
path = path + os . sep
for f in glob . glob ( path + "*" ) :
if os . path . isdir ( f ) :
if depth < MAX_DEPTH_RECUR : # avoid infinite recursive loop
file_list . extend ( get_files_from_dir ( f , recursive , depth + 1 ) )
else :
continue
elif f . endswith ( file_ext ) :
file_list . append ( f )
return file_list
|
def rfind ( self , bs , start = None , end = None , bytealigned = None ) :
"""Find final occurrence of substring bs .
Returns a single item tuple with the bit position if found , or an
empty tuple if not found . The bit position ( pos property ) will
also be set to the start of the substring if it is found .
bs - - The bitstring to find .
start - - The bit position to end the reverse search . Defaults to 0.
end - - The bit position one past the first bit to reverse search .
Defaults to self . len .
bytealigned - - If True the bitstring will only be found on byte
boundaries .
Raises ValueError if bs is empty , if start < 0 , if end > self . len or
if end < start ."""
|
bs = Bits ( bs )
start , end = self . _validate_slice ( start , end )
if bytealigned is None :
bytealigned = globals ( ) [ 'bytealigned' ]
if not bs . len :
raise ValueError ( "Cannot find an empty bitstring." )
# Search chunks starting near the end and then moving back
# until we find bs .
increment = max ( 8192 , bs . len * 80 )
buffersize = min ( increment + bs . len , end - start )
pos = max ( start , end - buffersize )
while True :
found = list ( self . findall ( bs , start = pos , end = pos + buffersize , bytealigned = bytealigned ) )
if not found :
if pos == start :
return ( )
pos = max ( start , pos - increment )
continue
return ( found [ - 1 ] , )
|
def insert_node ( self , i , species , coords , validate_proximity = False , site_properties = None , edges = None ) :
"""A wrapper around Molecule . insert ( ) , which also incorporates the new
site into the MoleculeGraph .
: param i : Index at which to insert the new site
: param species : Species for the new site
: param coords : 3x1 array representing coordinates of the new site
: param validate _ proximity : For Molecule . insert ( ) ; if True ( default
False ) , distance will be checked to ensure that site can be safely
added .
: param site _ properties : Site properties for Molecule
: param edges : List of dicts representing edges to be added to the
MoleculeGraph . These edges must include the index of the new site i ,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion , NOT before . Each dict should at
least have a " to _ index " and " from _ index " key , and can also have a
" weight " and a " properties " key .
: return :"""
|
self . molecule . insert ( i , species , coords , validate_proximity = validate_proximity , properties = site_properties )
mapping = { }
for j in range ( len ( self . molecule ) - 1 ) :
if j < i :
mapping [ j ] = j
else :
mapping [ j ] = j + 1
nx . relabel_nodes ( self . graph , mapping , copy = False )
self . graph . add_node ( i )
self . set_node_attributes ( )
if edges is not None :
for edge in edges :
try :
self . add_edge ( edge [ "from_index" ] , edge [ "to_index" ] , weight = edge . get ( "weight" , None ) , edge_properties = edge . get ( "properties" , None ) )
except KeyError :
raise RuntimeError ( "Some edges are invalid." )
|
def Register ( ) :
"""Adds all known parsers to the registry ."""
|
# pyformat : disable
# Command parsers .
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "Dpkg" , linux_cmd_parser . DpkgCmdParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "Dmidecode" , linux_cmd_parser . DmidecodeCmdParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "Mount" , config_file . MountCmdParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "OsxSpHardware" , osx_file_parser . OSXSPHardwareDataTypeParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "Ps" , linux_cmd_parser . PsCmdParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "Rpm" , linux_cmd_parser . RpmCmdParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "SshdConfig" , config_file . SshdConfigCmdParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "Sysctl" , linux_sysctl_parser . SysctlCmdParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "YumList" , linux_cmd_parser . YumListCmdParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "YumRepolist" , linux_cmd_parser . YumRepolistCmdParser )
# Grep parsers .
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "Passwd" , linux_file_parser . PasswdBufferParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "Netgroup" , linux_file_parser . NetgroupBufferParser )
# WMI query parsers .
parsers . MULTI_RESPONSE_PARSER_FACTORY . Register ( "WmiEventConsumer" , wmi_parser . WMIEventConsumerParser )
parsers . MULTI_RESPONSE_PARSER_FACTORY . Register ( "WmiInstalledSoftware" , wmi_parser . WMIInstalledSoftwareParser )
parsers . MULTI_RESPONSE_PARSER_FACTORY . Register ( "WmiHotfixesSoftware" , wmi_parser . WMIHotfixesSoftwareParser )
parsers . MULTI_RESPONSE_PARSER_FACTORY . Register ( "WmiUser" , wmi_parser . WMIUserParser )
parsers . MULTI_RESPONSE_PARSER_FACTORY . Register ( "WmiLogicalDisks" , wmi_parser . WMILogicalDisksParser )
parsers . MULTI_RESPONSE_PARSER_FACTORY . Register ( "WmiCsp" , wmi_parser . WMIComputerSystemProductParser )
parsers . MULTI_RESPONSE_PARSER_FACTORY . Register ( "WmiInterfaces" , wmi_parser . WMIInterfacesParser )
# Registry value parsers .
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "WinCcs" , windows_registry_parser . CurrentControlSetKBParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "WinCodepage" , windows_registry_parser . CodepageParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "WinEnvironment" , windows_registry_parser . WinEnvironmentParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "WinServices" , windows_registry_parser . WinServicesParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "WinSystemDrive" , windows_registry_parser . WinSystemDriveParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "WinSystemRoot" , windows_registry_parser . WinSystemRootParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "WinTimezone" , windows_registry_parser . WinTimezoneParser )
# Registry parsers .
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "WinAllUsersProfileEnvVar" , windows_registry_parser . AllUsersProfileEnvironmentVariable )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "WinProfileDirEnvVar" , windows_registry_parser . ProfilesDirectoryEnvironmentVariable )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "WinUserSids" , windows_registry_parser . WinUserSids )
# Artifact file parsers .
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "DarwinPersistenceMechanism" , osx_launchd . DarwinPersistenceMechanismsParser )
parsers . SINGLE_RESPONSE_PARSER_FACTORY . Register ( "WindowsPersistenceMechanism" , windows_persistence . WindowsPersistenceMechanismsParser )
# Registry multi - parsers .
parsers . MULTI_RESPONSE_PARSER_FACTORY . Register ( "WinUserSpecialDirs" , windows_registry_parser . WinUserSpecialDirs )
# Artifact file multi - parsers .
parsers . MULTI_RESPONSE_PARSER_FACTORY . Register ( "OsxUsers" , osx_file_parser . OSXUsersParser )
# File parsers .
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "ChromeHistory" , chrome_history . ChromeHistoryParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "CronAtAllAllowDeny" , config_file . CronAtAllowDenyParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "CronTab" , cron_file_parser . CronTabParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "FirefoxHistory" , firefox3_history . FirefoxHistoryParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "IeHistory" , ie_history . IEHistoryParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "LinuxWtmp" , linux_file_parser . LinuxWtmpParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "Mtab" , config_file . MtabParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "Netgroup" , linux_file_parser . NetgroupParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "NfsExports" , config_file . NfsExportsParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "Ntpd" , config_file . NtpdParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "PackageSource" , config_file . PackageSourceParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "Passwd" , linux_file_parser . PasswdParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "Path" , linux_file_parser . PathParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "SshdConfigFile" , config_file . SshdConfigParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "Sudoers" , config_file . SudoersParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "OsxLaunchdPlist" , osx_file_parser . OSXLaunchdPlistParser )
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "OSXInstallHistoryPlist" , osx_file_parser . OSXInstallHistoryPlistParser )
try :
from debian import deb822
# pylint : disable = g - import - not - at - top
parsers . SINGLE_FILE_PARSER_FACTORY . Register ( "DpkgStatusParser" , lambda : linux_software_parser . DebianPackagesStatusParser ( deb822 ) )
except ImportError :
pass
# File multi - parsers .
parsers . MULTI_FILE_PARSER_FACTORY . Register ( "LinuxBaseShadow" , linux_file_parser . LinuxBaseShadowParser )
parsers . MULTI_FILE_PARSER_FACTORY . Register ( "LinuxLsbInit" , linux_service_parser . LinuxLSBInitParser )
parsers . MULTI_FILE_PARSER_FACTORY . Register ( "LinuxXinetd" , linux_service_parser . LinuxXinetdParser )
parsers . MULTI_FILE_PARSER_FACTORY . Register ( "LinuxSysvInit" , linux_service_parser . LinuxSysVInitParser )
parsers . MULTI_FILE_PARSER_FACTORY . Register ( "LinuxPam" , linux_pam_parser . PAMParser )
parsers . MULTI_FILE_PARSER_FACTORY . Register ( "LinuxReleaseInfo" , linux_release_parser . LinuxReleaseParser )
parsers . MULTI_FILE_PARSER_FACTORY . Register ( "PciDevicesInfo" , linux_file_parser . PCIDevicesInfoParser )
parsers . MULTI_FILE_PARSER_FACTORY . Register ( "ProcSys" , linux_sysctl_parser . ProcSysParser )
parsers . MULTI_FILE_PARSER_FACTORY . Register ( "Rsyslog" , config_file . RsyslogParser )
|
def filter_pypi ( self , entry ) :
"""Show only usefull packages"""
|
for package in self . packages :
if entry . title . lower ( ) . startswith ( package ) :
return entry
|
def check_if_needs_modeling ( tomodir ) :
"""check of we need to run CRMod in a given tomodir"""
|
print ( 'check for modeling' , tomodir )
required_files = ( 'config' + os . sep + 'config.dat' , 'rho' + os . sep + 'rho.dat' , 'grid' + os . sep + 'elem.dat' , 'grid' + os . sep + 'elec.dat' , 'exe' + os . sep + 'crmod.cfg' , )
not_allowed = ( 'mod' + os . sep + 'volt.dat' , )
needs_modeling = True
for filename in not_allowed :
if os . path . isfile ( tomodir + os . sep + filename ) :
needs_modeling = False
for filename in required_files :
full_file = tomodir + os . sep + filename
if not os . path . isfile ( full_file ) :
print ( 'does not exist: ' , full_file )
needs_modeling = False
return needs_modeling
|
def minmax_candidates ( self ) :
'''Get points where derivative is zero .
Useful for computing the extrema of the polynomial over an interval if
the polynomial has real roots . In this case , the maximum is attained
for one of the interval endpoints or a point from the result of this
function that is contained in the interval .'''
|
from numpy . polynomial import Polynomial as P
p = P . fromroots ( self . roots )
return p . deriv ( 1 ) . roots ( )
|
def normalize_mode ( mode ) :
"""Returns a ( Micro ) QR Code mode constant which is equivalent to the
provided ` mode ` .
In case the provided ` mode ` is ` ` None ` ` , this function returns ` ` None ` ` .
Otherwise a mode constant is returned unless the provided parameter cannot
be mapped to a valid mode . In the latter case , a ModeError is raised .
: param mode : An integer or string or ` ` None ` ` .
: raises : ModeError : In case the provided ` mode ` does not represent a valid
QR Code mode .
: rtype : int or None"""
|
if mode is None or ( isinstance ( mode , int ) and mode in consts . MODE_MAPPING . values ( ) ) :
return mode
try :
return consts . MODE_MAPPING [ mode . lower ( ) ]
except : # KeyError or mode . lower ( ) fails
raise ModeError ( 'Illegal mode "{0}". Supported values: {1}' . format ( mode , ', ' . join ( sorted ( consts . MODE_MAPPING . keys ( ) ) ) ) )
|
def countByValue ( self ) :
"""Return the count of each unique value in this RDD as a dictionary of
( value , count ) pairs .
> > > sorted ( sc . parallelize ( [ 1 , 2 , 1 , 2 , 2 ] , 2 ) . countByValue ( ) . items ( ) )
[ ( 1 , 2 ) , ( 2 , 3 ) ]"""
|
def countPartition ( iterator ) :
counts = defaultdict ( int )
for obj in iterator :
counts [ obj ] += 1
yield counts
def mergeMaps ( m1 , m2 ) :
for k , v in m2 . items ( ) :
m1 [ k ] += v
return m1
return self . mapPartitions ( countPartition ) . reduce ( mergeMaps )
|
def has_reg ( value ) :
"""Return True if the given key exists in HKEY _ LOCAL _ MACHINE , False
otherwise ."""
|
try :
SCons . Util . RegOpenKeyEx ( SCons . Util . HKEY_LOCAL_MACHINE , value )
ret = True
except SCons . Util . WinError :
ret = False
return ret
|
def __trim_extensions_dot ( exts ) :
"""trim leading dots from extensions and drop any empty strings ."""
|
if exts is None :
return None
res = [ ]
for i in range ( 0 , len ( exts ) ) :
if exts [ i ] == "" :
continue
res . append ( __trim_extension_dot ( exts [ i ] ) )
return res
|
def parse_namespaces ( source_dirs , search_dirs = None ) :
"""Use only this function to parse DSDL definitions .
This function takes a list of root namespace directories ( containing DSDL definition files to parse ) and an
optional list of search directories ( containing DSDL definition files that can be referenced from the types
that are going to be parsed ) .
Returns the list of parsed type definitions , where type of each element is CompoundType .
Args :
source _ dirs : List of root namespace directories to parse .
search _ dirs : List of root namespace directories with referenced types ( optional ) . This list is
automatically extended with source _ dirs .
Example :
> > > import uavcan
> > > a = uavcan . dsdl . parse _ namespaces ( [ ' . . / dsdl / uavcan ' ] )
> > > len ( a )
77
> > > a [ 0]
uavcan . Timestamp
> > > a [ 0 ] . fields
[ truncated uint48 husec ]
> > > a [ 0 ] . constants
[ saturated uint48 UNKNOWN = 0 , saturated uint48 USEC _ PER _ LSB = 100]"""
|
# noinspection PyShadowingNames
def walk ( ) :
import fnmatch
from functools import partial
def on_walk_error ( directory , ex ) :
raise DsdlException ( 'OS error in [%s]: %s' % ( directory , str ( ex ) ) )
for source_dir in source_dirs :
walker = os . walk ( source_dir , onerror = partial ( on_walk_error , source_dir ) , followlinks = True )
for root , _dirnames , filenames in walker :
for filename in fnmatch . filter ( filenames , '*.uavcan' ) :
filename = os . path . join ( root , filename )
yield filename
all_default_dtid = { }
# ( kind , dtid ) : filename
# noinspection PyShadowingNames
def ensure_unique_dtid ( t , filename ) :
if t . default_dtid is None :
return
key = t . kind , t . default_dtid
if key in all_default_dtid :
first = pretty_filename ( all_default_dtid [ key ] )
second = pretty_filename ( filename )
error ( 'Default data type ID collision: [%s] [%s]' , first , second )
all_default_dtid [ key ] = filename
parser = Parser ( source_dirs + ( search_dirs or [ ] ) )
output_types = [ ]
for filename in walk ( ) :
t = parser . parse ( filename )
ensure_unique_dtid ( t , filename )
output_types . append ( t )
return output_types
|
def get_metric_type ( measure , aggregation ) :
"""Get the corresponding metric type for the given stats type .
: type measure : ( : class : ' ~ opencensus . stats . measure . BaseMeasure ' )
: param measure : the measure for which to find a metric type
: type aggregation : ( : class :
' ~ opencensus . stats . aggregation . BaseAggregation ' )
: param aggregation : the aggregation for which to find a metric type"""
|
if aggregation . aggregation_type == aggregation_module . Type . NONE :
raise ValueError ( "aggregation type must not be NONE" )
assert isinstance ( aggregation , AGGREGATION_TYPE_MAP [ aggregation . aggregation_type ] )
if aggregation . aggregation_type == aggregation_module . Type . SUM :
if isinstance ( measure , measure_module . MeasureInt ) :
return metric_descriptor . MetricDescriptorType . CUMULATIVE_INT64
elif isinstance ( measure , measure_module . MeasureFloat ) :
return metric_descriptor . MetricDescriptorType . CUMULATIVE_DOUBLE
else :
raise ValueError
elif aggregation . aggregation_type == aggregation_module . Type . COUNT :
return metric_descriptor . MetricDescriptorType . CUMULATIVE_INT64
elif aggregation . aggregation_type == aggregation_module . Type . DISTRIBUTION :
return metric_descriptor . MetricDescriptorType . CUMULATIVE_DISTRIBUTION
elif aggregation . aggregation_type == aggregation_module . Type . LASTVALUE :
if isinstance ( measure , measure_module . MeasureInt ) :
return metric_descriptor . MetricDescriptorType . GAUGE_INT64
elif isinstance ( measure , measure_module . MeasureFloat ) :
return metric_descriptor . MetricDescriptorType . GAUGE_DOUBLE
else :
raise ValueError
else :
raise AssertionError
|
def on_train_end ( self , logs ) :
"""Print training time at end of training"""
|
duration = timeit . default_timer ( ) - self . train_start
print ( 'done, took {:.3f} seconds' . format ( duration ) )
|
def zsh_mode ( self , delay_factor = 1 , prompt_terminator = "$" ) :
"""Run zsh command to unify the environment"""
|
delay_factor = self . select_delay_factor ( delay_factor )
self . clear_buffer ( )
command = self . RETURN + "zsh" + self . RETURN
self . write_channel ( command )
time . sleep ( 1 * delay_factor )
self . set_prompt ( )
self . clear_buffer ( )
|
def from_dataframe ( df , name = 'df' , client = None ) :
"""convenience function to construct an ibis table
from a DataFrame
EXPERIMENTAL API
Parameters
df : DataFrame
name : str , default ' df '
client : Client , default new PandasClient
client dictionary will be mutated with the
name of the DataFrame
Returns
Table"""
|
if client is None :
return connect ( { name : df } ) . table ( name )
client . dictionary [ name ] = df
return client . table ( name )
|
def data_to_list ( self , sysbase = False ) :
"""Return the loaded model data as a list of dictionaries .
Each dictionary contains the full parameters of an element .
: param sysbase : use system base quantities
: type sysbase : bool"""
|
ret = list ( )
# for each element
for i in range ( self . n ) : # read the parameter values and put in the temp dict ` ` e ` `
e = { }
for key in self . data_keys :
if sysbase and ( key in self . _store ) :
val = self . _store [ key ] [ i ]
else :
val = self . __dict__ [ key ] [ i ]
e [ key ] = val
ret . append ( e )
return ret
|
def copy_function ( func , name = None ) :
"""Copy a function object with different name .
Args :
func ( function ) : Function to be copied .
name ( string , optional ) : Name of the new function .
If not spacified , the same name of ` func ` will be used .
Returns :
newfunc ( function ) : New function with different name ."""
|
code = func . __code__
newname = name or func . __name__
newcode = CodeType ( code . co_argcount , code . co_kwonlyargcount , code . co_nlocals , code . co_stacksize , code . co_flags , code . co_code , code . co_consts , code . co_names , code . co_varnames , code . co_filename , newname , code . co_firstlineno , code . co_lnotab , code . co_freevars , code . co_cellvars , )
newfunc = FunctionType ( newcode , func . __globals__ , newname , func . __defaults__ , func . __closure__ , )
newfunc . __dict__ . update ( func . __dict__ )
return newfunc
|
def configfilepopulator ( self ) :
"""Populates an unpopulated config . xml file with run - specific values and creates
the file in the appropriate location"""
|
# Set the number of cycles for each read and index using the number of reads specified in the sample sheet
self . forwardlength = self . metadata . header . forwardlength
self . reverselength = self . metadata . header . reverselength
# Create a list of lists containing [ cycle start , cycle end , and : runid ] for each of forward reads , index 1
# index 2 , and reverse reads
cycles = [ [ 1 , self . forwardlength , self . runid ] , [ self . forwardlength + 1 , self . forwardlength + 8 , self . runid ] , [ self . forwardlength + 9 , self . forwardlength + 16 , self . runid ] , [ self . forwardlength + 17 , self . forwardlength + 16 + self . reverselength , self . runid ] ]
# A dictionary of parameters ( keys ) and the values to use when repopulating the config file
parameters = { 'RunFolder' : self . runid , 'RunFolderDate' : self . metadata . date . replace ( "-" , "" ) , 'RunFolderId' : self . metadata . runnumber , 'RunFlowcellId' : self . metadata . flowcell }
# Load the xml file using element tree
config = ElementTree . parse ( "{}/config.xml" . format ( self . homepath ) )
# Get the root of the tree
configroot = config . getroot ( )
# The run node is the only child node of the root
for run in configroot : # Iterate through the child nodes . There are three nodes sections that must be populated
for child in run : # Find the cycles tag
if child . tag == 'Cycles' : # Set the attributes with a dictionary containing the total reads
child . attrib = { 'Last' : '{}' . format ( self . forwardlength + 16 + self . reverselength ) , 'Number' : '{}' . format ( self . totalreads ) , 'First' : '1' }
elif child . tag == 'RunParameters' : # Name the child as runparameter for easier coding
runparameters = child
for runparameter in runparameters : # This replaces data in both ' ImagingReads ' and ' Reads ' nodes
if 'Reads' in runparameter . tag : # Enumerate through the run parameters
for indexcount , reads in enumerate ( runparameter ) : # The values for the index are 1 , 2 , 3 , 4 . Subtract one to get the index of the first
# list in cycles
index = int ( runparameter . attrib [ 'Index' ] ) - 1
# Set the text value as the appropriate value from cycles
reads . text = str ( cycles [ index ] [ indexcount ] )
# Populate the instrument value
if runparameter . tag == 'Instrument' :
runparameter . text = self . instrument
# Iterate through the parameters in the parameter dictionary
for parameter in parameters : # If the key is encountered
if runparameter . tag == parameter : # Replace the text with the value
runparameter . text = parameters [ parameter ]
if 'Barcode' in runparameter . tag :
for cycle , barcode in enumerate ( runparameter ) : # Add the barcode cycles . These are the number of forward reads ( + 1 as the barcode
# starts 1 cycle after the first run ) plus the current iterator
barcode . text = str ( self . forwardlength + 1 + cycle )
# Write the modified config file to the desired location
config . write ( '{}Data/Intensities/BaseCalls/config.xml' . format ( self . miseqfolder ) )
|
def edit ( self , index , name = None , priority = None , comment = None , done = None , parent = None ) :
"""Modifies : index : to specified data .
Every argument , which is not None , will get changed .
If parent is not None , the item will get reparented .
Use parent = - 1 or parent = ' ' for reparenting to top - level .
: index : Index of the item to edit .
: name : New name .
: priority : New priority .
: comment : New comment .
: done : Done mark .
: parent : New parent ."""
|
if parent == - 1 :
parent = ''
parent = self . _split ( parent )
index = self . _split ( index )
item = self . data
for j , c in enumerate ( index ) :
item = item [ int ( c ) - 1 ]
if j + 1 != len ( index ) :
item = item [ 4 ]
if name is not None :
item [ 0 ] = name
if priority is not None :
item [ 1 ] = priority
if comment is not None :
item [ 2 ] = comment
if done is not None :
item [ 3 ] = done
if parent is not None and parent != index [ : - 1 ] :
parentitem = self . data
for c in parent :
parentitem = parentitem [ int ( c ) - 1 ] [ 4 ]
parentitem . append ( item )
parent = index [ : - 1 ]
parentitem = self . data
for c in parent :
parentitem = parentitem [ int ( c ) - 1 ] [ 4 ]
parentitem . remove ( item )
|
def get_all_tags_of_confirmation ( self , confirmation_id ) :
"""Get all tags of confirmation
This will iterate over all pages until it gets all elements .
So if the rate limit exceeded it will throw an Exception and you will get nothing
: param confirmation _ id : the confirmation id
: return : list"""
|
return self . _iterate_through_pages ( get_function = self . get_tags_of_confirmation_per_page , resource = CONFIRMATION_TAGS , ** { 'confirmation_id' : confirmation_id } )
|
def _op ( self , operation , other , * allowed ) :
"""A basic operation operating on a single value ."""
|
f = self . _field
if self . _combining : # We are a field - compound query fragment , e . g . ( Foo . bar & Foo . baz ) .
return reduce ( self . _combining , ( q . _op ( operation , other , * allowed ) for q in f ) )
# pylint : disable = protected - access
# Optimize this away in production ; diagnosic aide .
if __debug__ and _complex_safety_check ( f , { operation } | set ( allowed ) ) : # pragma : no cover
raise NotImplementedError ( "{self!r} does not allow {op} comparison." . format ( self = self , op = operation ) )
if other is not None :
other = f . transformer . foreign ( other , ( f , self . _document ) )
return Filter ( { self . _name : { operation : other } } )
|
def decode_schedule ( string ) :
"""Decodes a string into a schedule tuple .
Args :
string : The string encoding of a schedule tuple .
Returns :
A schedule tuple , see encode _ schedule for details ."""
|
splits = string . split ( )
steps = [ int ( x [ 1 : ] ) for x in splits [ 1 : ] if x [ 0 ] == '@' ]
pmfs = np . reshape ( [ float ( x ) for x in splits [ 1 : ] if x [ 0 ] != '@' ] , [ len ( steps ) , - 1 ] )
return splits [ 0 ] , tuplize ( steps ) , tuplize ( pmfs )
|
def _restricted_growth_notation ( l ) :
"""The clustering returned by the hcluster module gives group
membership without regard for numerical order This function preserves
the group membership , but sorts the labelling into numerical order"""
|
list_length = len ( l )
d = defaultdict ( list )
for ( i , element ) in enumerate ( l ) :
d [ element ] . append ( i )
l2 = [ None ] * list_length
for ( name , index_list ) in enumerate ( sorted ( d . values ( ) , key = min ) ) :
for index in index_list :
l2 [ index ] = name
return tuple ( l2 )
|
def get_jaro_distance ( first , second , winkler = True , winkler_ajustment = True , scaling = 0.1 ) :
""": param first : word to calculate distance for
: param second : word to calculate distance with
: param winkler : same as winkler _ ajustment
: param winkler _ ajustment : add an adjustment factor to the Jaro of the distance
: param scaling : scaling factor for the Winkler adjustment
: return : Jaro distance adjusted ( or not )"""
|
if not first or not second :
raise JaroDistanceException ( "Cannot calculate distance from NoneType ({0}, {1})" . format ( first . __class__ . __name__ , second . __class__ . __name__ ) )
jaro = _score ( first , second )
cl = min ( len ( _get_prefix ( first , second ) ) , 4 )
if all ( [ winkler , winkler_ajustment ] ) : # 0.1 as scaling factor
return round ( ( jaro + ( scaling * cl * ( 1.0 - jaro ) ) ) * 100.0 ) / 100.0
return jaro
|
def compliance_tensor ( self ) :
"""returns the Voigt - notation compliance tensor ,
which is the matrix inverse of the
Voigt - notation elastic tensor"""
|
s_voigt = np . linalg . inv ( self . voigt )
return ComplianceTensor . from_voigt ( s_voigt )
|
def header_canonical ( self , header_name ) :
"""Translate HTTP headers to Django header names ."""
|
# Translate as stated in the docs :
# https : / / docs . djangoproject . com / en / 1.6 / ref / request - response / # django . http . HttpRequest . META
header_name = header_name . lower ( )
if header_name == 'content-type' :
return 'CONTENT-TYPE'
elif header_name == 'content-length' :
return 'CONTENT-LENGTH'
return 'HTTP_%s' % header_name . replace ( '-' , '_' ) . upper ( )
|
def hpai_body ( self ) :
"""Create a body with HPAI information .
This is used for disconnect and connection state requests ."""
|
body = [ ]
# = = = = = IP Body = = = = =
body . extend ( [ self . channel ] )
# Communication Channel Id
body . extend ( [ 0x00 ] )
# Reserverd
# = = = = = Client HPAI = = = = =
body . extend ( [ 0x08 ] )
# HPAI Length
body . extend ( [ 0x01 ] )
# Host Protocol
# Tunnel Client Socket IP
body . extend ( ip_to_array ( self . control_socket . getsockname ( ) [ 0 ] ) )
# Tunnel Client Socket Port
body . extend ( int_to_array ( self . control_socket . getsockname ( ) [ 1 ] ) )
return body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.