signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_token_credentials ( cls , username , request ) :
"""Get api token for user with username of : username :
Used by Token - based auth as ` credentials _ callback ` kwarg ."""
|
try :
user = cls . get_item ( username = username )
except Exception as ex :
log . error ( str ( ex ) )
forget ( request )
else :
if user :
return user . api_key . token
|
def get_bool ( _bytearray , byte_index , bool_index ) :
"""Get the boolean value from location in bytearray"""
|
index_value = 1 << bool_index
byte_value = _bytearray [ byte_index ]
current_value = byte_value & index_value
return current_value == index_value
|
def _pfp__handle_updated ( self , watched_field ) :
"""Handle the watched field that was updated"""
|
self . _pfp__no_notify = True
# nested data has been changed , so rebuild the
# nested data to update the field
# TODO a global setting to determine this behavior ?
# could slow things down a bit for large nested structures
# notice the use of _ is _ here - ' is ' ! = ' = = ' . ' = = ' uses
# the _ _ eq _ _ operator , while is compares id ( object ) results
if watched_field is self . _ :
self . _pfp__pack_data ( )
elif self . _pfp__update_func is not None :
self . _pfp__update_func . call ( [ self ] + self . _pfp__watch_fields , * self . _pfp__update_func_call_info )
self . _pfp__no_notify = False
|
def _get_cromwell_execution_dir ( base_dir , target_glob ) :
"""Retrieve the baseline directory with cromwell output files .
Handles Cromwell restarts where there are multiple work directories and
we traverse symlinks back to the original ."""
|
cur_dir = glob . glob ( os . path . join ( base_dir , target_glob ) ) [ 0 ]
if os . path . exists ( os . path . join ( cur_dir , "cwl.output.json" ) ) :
return base_dir
else :
symlink_dir = os . path . dirname ( os . path . realpath ( os . path . join ( cur_dir , "script" ) ) )
ref_base = os . path . dirname ( base_dir )
new_guid = symlink_dir [ symlink_dir . find ( ref_base ) + len ( ref_base ) + 1 : ] . split ( "/" ) [ 0 ]
return _get_cromwell_execution_dir ( os . path . join ( ref_base , new_guid ) , target_glob )
|
def _force_read ( self , element , value , text_prefix_before , text_suffix_before , text_prefix_after , text_suffix_after , data_of ) :
"""Force the screen reader display an information of element with prefixes
or suffixes .
: param element : The reference element .
: type element : hatemile . util . html . htmldomelement . HTMLDOMElement
: param value : The value to be show .
: type value : str
: param text _ prefix _ before : The prefix of value to show before the
element .
: type text _ prefix _ before : str
: param text _ suffix _ before : The suffix of value to show before the
element .
: type text _ suffix _ before : str
: param text _ prefix _ after : The prefix of value to show after the
element .
: type text _ prefix _ after : str
: param text _ suffix _ after : The suffix of value to show after the
element .
: type text _ suffix _ after : str
: param data _ of : The name of attribute that links the content with
element .
: type data _ of : str"""
|
if ( text_prefix_before ) or ( text_suffix_before ) :
text_before = text_prefix_before + value + text_suffix_before
else :
text_before = ''
if ( text_prefix_after ) or ( text_suffix_after ) :
text_after = text_prefix_after + value + text_suffix_after
else :
text_after = ''
self . _force_read_simple ( element , text_before , text_after , data_of )
|
def persist ( self , storageLevel ) :
"""Persists the underlying RDD with the specified storage level ."""
|
if not isinstance ( storageLevel , StorageLevel ) :
raise TypeError ( "`storageLevel` should be a StorageLevel, got %s" % type ( storageLevel ) )
javaStorageLevel = self . _java_matrix_wrapper . _sc . _getJavaStorageLevel ( storageLevel )
self . _java_matrix_wrapper . call ( "persist" , javaStorageLevel )
return self
|
def from_clause ( cls , clause ) :
"""Factory method"""
|
name = clause . getName ( )
if name == "not" :
cond = Invert ( cls . from_clause ( clause [ 1 ] ) )
elif name == "operator" :
cond = OperatorConstraint . from_clause ( clause )
elif name == "conjunction" or clause . conjunction :
cond = Conjunction . from_clause ( clause )
elif name == "function" :
cond = FunctionConstraint . from_clause ( clause )
elif name == "between" :
cond = BetweenConstraint . from_clause ( clause )
elif name == "in" :
cond = InConstraint . from_clause ( clause )
else :
raise SyntaxError ( "Unknown constraint type %r" % name )
return cond
|
def project_remove_tags ( object_id , input_params = { } , always_retry = True , ** kwargs ) :
"""Invokes the / project - xxxx / removeTags API method .
For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Projects # API - method % 3A - % 2Fproject - xxxx % 2FremoveTags"""
|
return DXHTTPRequest ( '/%s/removeTags' % object_id , input_params , always_retry = always_retry , ** kwargs )
|
def prepare_info ( self , ts = None ) :
"""Return all session unique ids recorded in prepare phase .
: param ts : timestamp , default to current timestamp
: return : set of session unique ids"""
|
sp_key = "%s:session_prepare" % self . namespace ( ts or int ( time . time ( ) ) )
return set ( s ( m ) for m in self . r . smembers ( sp_key ) )
|
def _folder_get_content_iter ( self , folder_key = None ) :
"""Iterator for api . folder _ get _ content"""
|
lookup_params = [ { 'content_type' : 'folders' , 'node' : 'folders' } , { 'content_type' : 'files' , 'node' : 'files' } ]
for param in lookup_params :
more_chunks = True
chunk = 0
while more_chunks :
chunk += 1
content = self . api . folder_get_content ( content_type = param [ 'content_type' ] , chunk = chunk , folder_key = folder_key ) [ 'folder_content' ]
# empty folder / file list
if not content [ param [ 'node' ] ] :
break
# no next page
if content [ 'more_chunks' ] == 'no' :
more_chunks = False
for resource_info in content [ param [ 'node' ] ] :
yield resource_info
|
def mill ( self ) :
'''Processes the variables collected from agents using the function millRule ,
storing the results in attributes named in aggr _ sow .
Parameters
none
Returns
none'''
|
# Make a dictionary of inputs for the millRule
reap_vars_string = ''
for name in self . reap_vars :
reap_vars_string += ' \'' + name + '\' : self.' + name + ','
const_vars_string = ''
for name in self . const_vars :
const_vars_string += ' \'' + name + '\' : self.' + name + ','
mill_dict = eval ( '{' + reap_vars_string + const_vars_string + '}' )
# Run the millRule and store its output in self
product = self . millRule ( ** mill_dict )
for j in range ( len ( self . sow_vars ) ) :
this_var = self . sow_vars [ j ]
this_product = getattr ( product , this_var )
setattr ( self , this_var , this_product )
|
def cutadapt ( job , inputs , r1_id , r2_id ) :
"""Filters out adapters that may be left in the RNA - seq files
: param JobFunctionWrappingJob job : passed by Toil automatically
: param Namespace inputs : Stores input arguments ( see main )
: param str r1 _ id : FileStore ID of read 1 fastq
: param str r2 _ id : FileStore ID of read 2 fastq"""
|
job . fileStore . logToMaster ( 'Running CutAdapt: {}' . format ( inputs . uuid ) )
work_dir = job . fileStore . getLocalTempDir ( )
inputs . improper_pair = None
# Retrieve files
job . fileStore . readGlobalFile ( r1_id , os . path . join ( work_dir , 'R1.fastq' ) )
job . fileStore . readGlobalFile ( r2_id , os . path . join ( work_dir , 'R2.fastq' ) )
# Cutadapt parameters
parameters = [ '-a' , inputs . fwd_3pr_adapter , '-m' , '35' , '-A' , inputs . rev_3pr_adapter , '-o' , '/data/R1_cutadapt.fastq' , '-p' , '/data/R2_cutadapt.fastq' , '/data/R1.fastq' , '/data/R2.fastq' ]
# Call : CutAdapt
base_docker_call = 'docker run --log-driver=none --rm -v {}:/data' . format ( work_dir ) . split ( )
if inputs . sudo :
base_docker_call = [ 'sudo' ] + base_docker_call
tool = 'quay.io/ucsc_cgl/cutadapt:1.9--6bd44edd2b8f8f17e25c5a268fedaab65fa851d2'
p = subprocess . Popen ( base_docker_call + [ tool ] + parameters , stderr = subprocess . PIPE , stdout = subprocess . PIPE )
stdout , stderr = p . communicate ( )
if p . returncode != 0 :
if 'improperly paired' in stderr :
inputs . improper_pair = True
shutil . move ( os . path . join ( work_dir , 'R1.fastq' ) , os . path . join ( work_dir , 'R1_cutadapt.fastq' ) )
shutil . move ( os . path . join ( work_dir , 'R2.fastq' ) , os . path . join ( work_dir , 'R2_cutadapt.fastq' ) )
# Write to fileStore
if inputs . improper_pair :
r1_cutadapt = r1_id
r2_cutadapt = r2_id
else :
r1_cutadapt = job . fileStore . writeGlobalFile ( os . path . join ( work_dir , 'R1_cutadapt.fastq' ) )
r2_cutadapt = job . fileStore . writeGlobalFile ( os . path . join ( work_dir , 'R2_cutadapt.fastq' ) )
job . fileStore . deleteGlobalFile ( r1_id )
job . fileStore . deleteGlobalFile ( r2_id )
# start STAR
cores = min ( inputs . cores , 16 )
job . addChildJobFn ( star , inputs , r1_cutadapt , r2_cutadapt , cores = cores , disk = '100G' , memory = '40G' ) . rv ( )
|
def name_resolve ( self , name = None , recursive = False , nocache = False , ** kwargs ) :
"""Gets the value currently published at an IPNS name .
IPNS is a PKI namespace , where names are the hashes of public keys , and
the private key enables publishing new ( signed ) values . In resolve , the
default value of ` ` name ` ` is your own identity public key .
. . code - block : : python
> > > c . name _ resolve ( )
{ ' Path ' : ' / ipfs / QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d ' }
Parameters
name : str
The IPNS name to resolve ( defaults to the connected node )
recursive : bool
Resolve until the result is not an IPFS name ( default : false )
nocache : bool
Do not use cached entries ( default : false )
Returns
dict : The IPFS path the IPNS hash points at"""
|
kwargs . setdefault ( "opts" , { "recursive" : recursive , "nocache" : nocache } )
args = ( name , ) if name is not None else ( )
return self . _client . request ( '/name/resolve' , args , decoder = 'json' , ** kwargs )
|
def parse_tablature ( lines ) :
'''Parse a list of lines into a ` Tablature ` .'''
|
lines = [ parse_line ( l ) for l in lines ]
return Tablature ( lines = lines )
|
def serialize ( self ) :
"""Serialize to JSON document that can be accepted by the
X - Ray backend service . It uses jsonpickle to perform
serialization ."""
|
try :
return jsonpickle . encode ( self , unpicklable = False )
except Exception :
log . exception ( "got an exception during serialization" )
|
def _bytes_from_json ( value , field ) :
"""Base64 - decode value"""
|
if _not_null ( value , field ) :
return base64 . standard_b64decode ( _to_bytes ( value ) )
|
def get_rec_dtype ( self , ** keys ) :
"""Get the dtype for the specified columns
parameters
colnums : integer array
The column numbers , 0 offset
vstorage : string , optional
See docs in read _ columns"""
|
colnums = keys . get ( 'colnums' , None )
vstorage = keys . get ( 'vstorage' , self . _vstorage )
if colnums is None :
colnums = self . _extract_colnums ( )
descr = [ ]
isvararray = numpy . zeros ( len ( colnums ) , dtype = numpy . bool )
for i , colnum in enumerate ( colnums ) :
dt , isvar = self . get_rec_column_descr ( colnum , vstorage )
descr . append ( dt )
isvararray [ i ] = isvar
dtype = numpy . dtype ( descr )
offsets = numpy . zeros ( len ( colnums ) , dtype = 'i8' )
for i , n in enumerate ( dtype . names ) :
offsets [ i ] = dtype . fields [ n ] [ 1 ]
return dtype , offsets , isvararray
|
def create_key_filter ( properties : Dict [ str , list ] ) -> List [ Tuple ] :
"""Generate combinations of key , value pairs for each key in properties .
Examples
properties = { ' ent ' : [ ' geo _ rev ' , ' supply _ chain ' ] , ' own ' , ' fi ' }
> > create _ key _ filter ( properties )
- - > [ ( ' ent ' , ' geo _ rev ' ) , ( ' ent ' , ' supply _ chain ' ) , ( ' own ' , ' fi ' ) ]"""
|
combinations = ( product ( [ k ] , v ) for k , v in properties . items ( ) )
return chain . from_iterable ( combinations )
|
def load_plugin_widgets ( self ) :
"""Pull widgets added via plugins using the ` enaml _ native _ widgets `
entry point . The entry point function must return a dictionary of
Widget declarations to add to the core api .
def install ( ) :
from charts . widgets . chart _ view import BarChart , LineChart
return {
' BarChart ' : BarChart ,
' LineCart ' : LineChart ,"""
|
from enamlnative . widgets import api
for plugin in self . get_plugins ( group = 'enaml_native_widgets' ) :
get_widgets = plugin . load ( )
for name , widget in iter ( get_widgets ( ) ) : # : Update the core api with these widgets
setattr ( api , name , widget )
|
def _set_vrf ( self , v , load = False ) :
"""Setter method for vrf , mapped from YANG variable / vrf ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ vrf is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ vrf ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "vrf_name" , vrf . vrf , yang_name = "vrf" , rest_name = "vrf" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'vrf-name' , extensions = { u'tailf-common' : { u'info' : u'VRF configurations' , u'cli-no-key-completion' : None , u'cli-full-no' : None , u'sort-priority' : u'RUNNCFG_LEVEL_SYSTEM_VRF_NAME' , u'cli-suppress-list-no' : None , u'cli-suppress-key-abbreviation' : None , u'cli-full-command' : None , u'callpoint' : u'vrfCallpoint' } } ) , is_container = 'list' , yang_name = "vrf" , rest_name = "vrf" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'VRF configurations' , u'cli-no-key-completion' : None , u'cli-full-no' : None , u'sort-priority' : u'RUNNCFG_LEVEL_SYSTEM_VRF_NAME' , u'cli-suppress-list-no' : None , u'cli-suppress-key-abbreviation' : None , u'cli-full-command' : None , u'callpoint' : u'vrfCallpoint' } } , namespace = 'urn:brocade.com:mgmt:brocade-vrf' , defining_module = 'brocade-vrf' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """vrf must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("vrf_name",vrf.vrf, yang_name="vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf-name', extensions={u'tailf-common': {u'info': u'VRF configurations', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'RUNNCFG_LEVEL_SYSTEM_VRF_NAME', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'vrfCallpoint'}}), is_container='list', yang_name="vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF configurations', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'RUNNCFG_LEVEL_SYSTEM_VRF_NAME', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'vrfCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='list', is_config=True)""" , } )
self . __vrf = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def fetch_pcr ( * args , ** kwargs ) :
"""Wrapper for fetch to automatically parse results from the PCR API ."""
|
# Load user ' s token from ` PCR _ AUTH _ TOKEN ` , use public token as default if missing
kwargs [ 'token' ] = os . getenv ( "PCR_AUTH_TOKEN" , "public" )
return fetch ( DOMAIN , * args , ** kwargs ) [ 'result' ]
|
def process_edge_flow ( self , source , sink , i , j , algo , q ) :
'''API : process _ edge _ flow ( self , source , sink , i , j , algo , q )
Description :
Used by by max _ flow _ preflowpush ( ) method . Processes edges along
prefolow push .
Input :
source : Source node name of flow graph .
sink : Sink node name of flow graph .
i : Source node in the processed edge ( tail of arc ) .
j : Sink node in the processed edge ( head of arc ) .
Post :
The ' flow ' and ' excess ' attributes of nodes may get updated .
Return :
Returns False if residual capacity is 0 , True otherwise .'''
|
if ( self . get_node_attr ( i , 'distance' ) != self . get_node_attr ( j , 'distance' ) + 1 ) :
return False
if ( i , j ) in self . edge_attr :
edge = ( i , j )
capacity = self . get_edge_attr ( i , j , 'capacity' )
mult = 1
else :
edge = ( j , i )
capacity = 0
mult = - 1
flow = mult * self . edge_attr [ edge ] [ 'flow' ]
residual_capacity = capacity - flow
if residual_capacity == 0 :
return False
excess_i = self . get_node_attr ( i , 'excess' )
excess_j = self . get_node_attr ( j , 'excess' )
push_amount = min ( excess_i , residual_capacity )
self . edge_attr [ edge ] [ 'flow' ] = mult * ( flow + push_amount )
self . set_node_attr ( i , 'excess' , excess_i - push_amount )
self . set_node_attr ( j , 'excess' , excess_j + push_amount )
return True
|
def decode ( token , certs = None , verify = True , audience = None ) :
"""Decode and verify a JWT .
Args :
token ( str ) : The encoded JWT .
certs ( Union [ str , bytes , Mapping [ str , Union [ str , bytes ] ] ] ) : The
certificate used to validate the JWT signature . If bytes or string ,
it must the the public key certificate in PEM format . If a mapping ,
it must be a mapping of key IDs to public key certificates in PEM
format . The mapping must contain the same key ID that ' s specified
in the token ' s header .
verify ( bool ) : Whether to perform signature and claim validation .
Verification is done by default .
audience ( str ) : The audience claim , ' aud ' , that this JWT should
contain . If None then the JWT ' s ' aud ' parameter is not verified .
Returns :
Mapping [ str , str ] : The deserialized JSON payload in the JWT .
Raises :
ValueError : if any verification checks failed ."""
|
header , payload , signed_section , signature = _unverified_decode ( token )
if not verify :
return payload
# If certs is specified as a dictionary of key IDs to certificates , then
# use the certificate identified by the key ID in the token header .
if isinstance ( certs , collections . Mapping ) :
key_id = header . get ( 'kid' )
if key_id :
if key_id not in certs :
raise ValueError ( 'Certificate for key id {} not found.' . format ( key_id ) )
certs_to_check = [ certs [ key_id ] ]
# If there ' s no key id in the header , check against all of the certs .
else :
certs_to_check = certs . values ( )
else :
certs_to_check = certs
# Verify that the signature matches the message .
if not crypt . verify_signature ( signed_section , signature , certs_to_check ) :
raise ValueError ( 'Could not verify token signature.' )
# Verify the issued at and created times in the payload .
_verify_iat_and_exp ( payload )
# Check audience .
if audience is not None :
claim_audience = payload . get ( 'aud' )
if audience != claim_audience :
raise ValueError ( 'Token has wrong audience {}, expected {}' . format ( claim_audience , audience ) )
return payload
|
def use_absl_handler ( ) :
"""Uses the ABSL logging handler for logging if not yet configured .
The absl handler is already attached to root if there are no other handlers
attached when importing this module .
Otherwise , this method is called in app . run ( ) so absl handler is used ."""
|
absl_handler = get_absl_handler ( )
if absl_handler not in logging . root . handlers :
logging . root . addHandler ( absl_handler )
FLAGS [ 'verbosity' ] . _update_logging_levels ( )
|
def update ( d , e ) :
"""Return a copy of dict ` d ` updated with dict ` e ` ."""
|
res = copy . copy ( d )
res . update ( e )
return res
|
def lorem ( anon , obj , field , val ) :
"""Generates a paragraph of lorem ipsum text"""
|
return ' ' . join ( anon . faker . sentences ( field = field ) )
|
def update_variables ( X , Z , U , prox_f , step_f , prox_g , step_g , L ) :
"""Update the primal and dual variables
Note : X , Z , U are updated inline
Returns : LX , R , S"""
|
if not hasattr ( prox_g , '__iter__' ) :
if prox_g is not None :
dX = step_f / step_g * L . T . dot ( L . dot ( X ) - Z + U )
X [ : ] = prox_f ( X - dX , step_f )
LX , R , S = do_the_mm ( X , step_f , Z , U , prox_g , step_g , L )
else : # fall back to simple fixed - point method for f
# see do _ the _ mm for normal definitions of LX , Z , R , S
S = - X . copy ( )
X [ : ] = prox_f ( X , step_f )
LX = X
Z [ : ] = X [ : ]
R = np . zeros ( X . shape , dtype = X . dtype )
S += X
else :
M = len ( prox_g )
dX = np . sum ( [ step_f / step_g [ i ] * L [ i ] . T . dot ( L [ i ] . dot ( X ) - Z [ i ] + U [ i ] ) for i in range ( M ) ] , axis = 0 )
X [ : ] = prox_f ( X - dX , step_f )
LX = [ None ] * M
R = [ None ] * M
S = [ None ] * M
for i in range ( M ) :
LX [ i ] , R [ i ] , S [ i ] = do_the_mm ( X , step_f , Z [ i ] , U [ i ] , prox_g [ i ] , step_g [ i ] , L [ i ] )
return LX , R , S
|
def __response_url ( self , message_id ) :
"""URL for responding to agent requests ."""
|
if self . from_ . pid != 0 :
path = AGENT_RESPONSE_PATH % ( self . from_ . pid , message_id )
return "http://%s:%s/%s" % ( self . host , self . port , path )
|
def close_db_connections ( self , instance , db_key , db_name = None ) :
"""We close the db connections explicitly b / c when we don ' t they keep
locks on the db . This presents as issues such as the SQL Server Agent
being unable to stop ."""
|
conn_key = self . _conn_key ( instance , db_key , db_name )
if conn_key not in self . connections :
return
try :
self . connections [ conn_key ] [ 'conn' ] . close ( )
del self . connections [ conn_key ]
except Exception as e :
self . log . warning ( "Could not close adodbapi db connection\n{0}" . format ( e ) )
|
def from_array ( arr , name = None ) : # type : ( np . ndarray [ Any ] , Optional [ Text ] ) - > TensorProto
"""Converts a numpy array to a tensor def .
Inputs :
arr : a numpy array .
name : ( optional ) the name of the tensor .
Returns :
tensor _ def : the converted tensor def ."""
|
tensor = TensorProto ( )
tensor . dims . extend ( arr . shape )
if name :
tensor . name = name
if arr . dtype == np . object : # Special care for strings .
tensor . data_type = mapping . NP_TYPE_TO_TENSOR_TYPE [ arr . dtype ]
# TODO : Introduce full string support .
# We flatten the array in case there are 2 - D arrays are specified
# We throw the error below if we have a 3 - D array or some kind of other
# object . If you want more complex shapes then follow the below instructions .
# Unlike other types where the shape is automatically inferred from
# nested arrays of values , the only reliable way now to feed strings
# is to put them into a flat array then specify type astype ( np . object )
# ( otherwise all strings may have different types depending on their length )
# and then specify shape . reshape ( [ x , y , z ] )
flat_array = arr . flatten ( )
for e in flat_array :
if isinstance ( e , text_type ) :
tensor . string_data . append ( e . encode ( 'utf-8' ) )
elif isinstance ( e , np . ndarray ) :
for s in e :
if isinstance ( s , text_type ) :
tensor . string_data . append ( s . encode ( 'utf-8' ) )
else :
raise NotImplementedError ( "Unrecognized object in the object array, expect a string, or array of bytes: " , str ( type ( e ) ) )
return tensor
# For numerical types , directly use numpy raw bytes .
try :
dtype = mapping . NP_TYPE_TO_TENSOR_TYPE [ arr . dtype ]
except KeyError :
raise RuntimeError ( "Numpy data type not understood yet: {}" . format ( str ( arr . dtype ) ) )
tensor . data_type = dtype
tensor . raw_data = arr . tobytes ( )
# note : tobytes ( ) is only after 1.9.
return tensor
|
def get_form_value ( self , form_key , object_brain_uid , default = None ) :
"""Returns a value from the request ' s form for the given uid , if any"""
|
if form_key not in self . request . form :
return default
uid = object_brain_uid
if not api . is_uid ( uid ) :
uid = api . get_uid ( object_brain_uid )
values = self . request . form . get ( form_key )
if isinstance ( values , list ) :
if len ( values ) == 0 :
return default
if len ( values ) > 1 :
logger . warn ( "Multiple set of values for {}" . format ( form_key ) )
values = values [ 0 ]
return values . get ( uid , default )
|
def run ( ) :
"""Compare two or more sets of GO IDs . Best done using sections ."""
|
obj = CompareGOsCli ( )
obj . write ( obj . kws . get ( 'xlsx' ) , obj . kws . get ( 'ofile' ) , obj . kws . get ( 'verbose' , False ) )
|
def pytype_to_deps_hpp ( t ) :
"""python - > pythonic type hpp filename ."""
|
if isinstance ( t , List ) :
return { 'list.hpp' } . union ( pytype_to_deps_hpp ( t . __args__ [ 0 ] ) )
elif isinstance ( t , Set ) :
return { 'set.hpp' } . union ( pytype_to_deps_hpp ( t . __args__ [ 0 ] ) )
elif isinstance ( t , Dict ) :
tkey , tvalue = t . __args__
return { 'dict.hpp' } . union ( pytype_to_deps_hpp ( tkey ) , pytype_to_deps_hpp ( tvalue ) )
elif isinstance ( t , Tuple ) :
return { 'tuple.hpp' } . union ( * [ pytype_to_deps_hpp ( elt ) for elt in t . __args__ ] )
elif isinstance ( t , NDArray ) :
out = { 'ndarray.hpp' }
# it ' s a transpose !
if t . __args__ [ 1 ] . start == - 1 :
out . add ( 'numpy_texpr.hpp' )
return out . union ( pytype_to_deps_hpp ( t . __args__ [ 0 ] ) )
elif isinstance ( t , Pointer ) :
return { 'pointer.hpp' } . union ( pytype_to_deps_hpp ( t . __args__ [ 0 ] ) )
elif isinstance ( t , Fun ) :
return { 'cfun.hpp' } . union ( * [ pytype_to_deps_hpp ( a ) for a in t . __args__ ] )
elif t in PYTYPE_TO_CTYPE_TABLE :
return { '{}.hpp' . format ( t . __name__ ) }
else :
raise NotImplementedError ( "{0}:{1}" . format ( type ( t ) , t ) )
|
def identifier_simple ( mesh ) :
"""Return a basic identifier for a mesh , consisting of properties
that have been hand tuned to be somewhat robust to rigid
transformations and different tesselations .
Parameters
mesh : Trimesh object
Source geometry
Returns
identifier : ( 6 , ) float
Identifying values of the mesh"""
|
# verify the cache once
mesh . _cache . verify ( )
# don ' t check hashes during identifier as we aren ' t
# changing any data values of the mesh inside block
# if we did change values in cache block things would break
with mesh . _cache : # pre - allocate identifier so indexes of values can ' t move around
# like they might if we used hstack or something else
identifier = np . zeros ( 6 , dtype = np . float64 )
# avoid thrashing the cache unnecessarily
mesh_area = mesh . area
# start with properties that are valid regardless of watertightness
# note that we ' re going to try to make all parameters relative
# to area so other values don ' t get blown up at weird scales
identifier [ 0 ] = mesh_area
# topological constant and the only thing we can really
# trust in this fallen world
identifier [ 1 ] = mesh . euler_number
# if we have a watertight mesh include volume and inertia
if mesh . is_volume : # side length of a cube ratio
# 1.0 for cubes , different values for other things
identifier [ 2 ] = ( ( ( mesh_area / 6.0 ) ** ( 1.0 / 2.0 ) ) / ( mesh . volume ** ( 1.0 / 3.0 ) ) )
# save vertices for radius calculation
vertices = mesh . vertices - mesh . center_mass
# we are going to special case radially symmetric meshes
# to replace their surface area with ratio of their
# surface area to a primitive sphere or cylinder surface area
# this is because tessellated curved surfaces are really rough
# to reliably hash as they are very sensitive to floating point
# and tessellation error . By making area proportionate to a fit
# primitive area we are able to reliably hash at more sigfigs
if mesh . symmetry == 'radial' : # cylinder height
h = np . dot ( vertices , mesh . symmetry_axis ) . ptp ( )
# section radius
R2 = ( np . dot ( vertices , mesh . symmetry_section . T ) ** 2 ) . sum ( axis = 1 ) . max ( )
# area of a cylinder primitive
area = ( 2 * np . pi * ( R2 ** .5 ) * h ) + ( 2 * np . pi * R2 )
# replace area in this case with area ratio
identifier [ 0 ] = mesh_area / area
elif mesh . symmetry == 'spherical' : # handle a spherically symmetric mesh
R2 = ( vertices ** 2 ) . sum ( axis = 1 ) . max ( )
area = 4 * np . pi * R2
identifier [ 0 ] = mesh_area / area
else : # if we don ' t have a watertight mesh add information about the
# convex hull , which is slow to compute and unreliable
# just what we ' re looking for in a hash but hey
identifier [ 3 ] = mesh_area / mesh . convex_hull . area
# cube side length ratio for the hull
identifier [ 4 ] = ( ( ( mesh . convex_hull . area / 6.0 ) ** ( 1.0 / 2.0 ) ) / ( mesh . convex_hull . volume ** ( 1.0 / 3.0 ) ) )
vertices = mesh . vertices - mesh . centroid
# add in max radius ^ 2 to area ratio
R2 = ( vertices ** 2 ) . sum ( axis = 1 ) . max ( )
identifier [ 5 ] = R2 / mesh_area
return identifier
|
def cli ( env , volume_id , notes ) :
"""Creates a snapshot on a given volume"""
|
block_manager = SoftLayer . BlockStorageManager ( env . client )
snapshot = block_manager . create_snapshot ( volume_id , notes = notes )
if 'id' in snapshot :
click . echo ( 'New snapshot created with id: %s' % snapshot [ 'id' ] )
else :
click . echo ( 'Error occurred while creating snapshot.\n' 'Ensure volume is not failed over or in another ' 'state which prevents taking snapshots.' )
|
def get_power_state ( self , userid ) :
"""Get power status of a z / VM instance ."""
|
LOG . debug ( 'Querying power stat of %s' % userid )
requestData = "PowerVM " + userid + " status"
action = "query power state of '%s'" % userid
with zvmutils . log_and_reraise_smt_request_failed ( action ) :
results = self . _request ( requestData )
with zvmutils . expect_invalid_resp_data ( results ) :
status = results [ 'response' ] [ 0 ] . partition ( ': ' ) [ 2 ]
return status
|
def linreg_ols_lu ( y , X ) :
"""Linear Regression , OLS , by solving linear equations and LU decomposition
Properties
* based on LAPACK ' s _ gesv what applies LU decomposition
* avoids using python ' s inverse functions
* should be stable
* no overhead or other computations
Example :
beta = linreg _ ols _ lu ( y , X )
Links :
* http : / / oxyba . de / docs / linreg _ ols _ lu"""
|
import numpy as np
try : # solve OLS formula
return np . linalg . solve ( np . dot ( X . T , X ) , np . dot ( X . T , y ) )
except np . linalg . LinAlgError :
print ( "LinAlgError: X*X' is singular or not square." )
return None
|
def quantile ( expr , prob = None , ** kw ) :
"""Percentile value .
: param expr :
: param prob : probability or list of probabilities , in [ 0 , 1]
: return :"""
|
prob = kw . get ( '_prob' , prob )
output_type = _stats_type ( expr )
if isinstance ( prob , ( list , set ) ) and not isinstance ( expr , GroupBy ) :
output_type = types . List ( output_type )
return _reduction ( expr , Quantile , output_type , _prob = prob )
|
def complete_sum ( self ) :
"""Return an equivalent DNF expression that includes all prime
implicants ."""
|
node = self . node . complete_sum ( )
if node is self . node :
return self
else :
return _expr ( node )
|
def fastrcnn_outputs ( feature , num_classes , class_agnostic_regression = False ) :
"""Args :
feature ( any shape ) :
num _ classes ( int ) : num _ category + 1
class _ agnostic _ regression ( bool ) : if True , regression to N x 1 x 4
Returns :
cls _ logits : N x num _ class classification logits
reg _ logits : N x num _ classx4 or Nx2x4 if class agnostic"""
|
classification = FullyConnected ( 'class' , feature , num_classes , kernel_initializer = tf . random_normal_initializer ( stddev = 0.01 ) )
num_classes_for_box = 1 if class_agnostic_regression else num_classes
box_regression = FullyConnected ( 'box' , feature , num_classes_for_box * 4 , kernel_initializer = tf . random_normal_initializer ( stddev = 0.001 ) )
box_regression = tf . reshape ( box_regression , ( - 1 , num_classes_for_box , 4 ) , name = 'output_box' )
return classification , box_regression
|
def data ( self ) :
"""Return the examples in the dataset in order , sorted , or shuffled ."""
|
if self . sort :
xs = sorted ( self . dataset , key = self . sort_key )
elif self . shuffle :
xs = [ self . dataset [ i ] for i in self . random_shuffler ( range ( len ( self . dataset ) ) ) ]
else :
xs = self . dataset
return xs
|
def flavor_access_list ( name , projects , ** kwargs ) :
'''Grants access of the flavor to a project . Flavor must be private .
: param name : non - public flavor name
: param projects : list of projects which should have the access to the flavor
. . code - block : : yaml
nova - flavor - share :
nova . flavor _ project _ access :
- name : myflavor
- project :
- project1
- project2
To remove all project from access list :
. . code - block : : yaml
- project : [ ]'''
|
dry_run = __opts__ [ 'test' ]
ret = { 'name' : name , 'result' : False , 'comment' : '' , 'changes' : { } }
kwargs . update ( { 'filter' : { 'is_public' : False } } )
try :
flavor_list = __salt__ [ 'nova.flavor_list' ] ( ** kwargs )
flavor_id = flavor_list [ name ] [ 'id' ]
except KeyError :
raise
project_list = __salt__ [ 'keystone.project_list' ] ( ** kwargs )
access_list = __salt__ [ 'nova.flavor_access_list' ] ( flavor_id , ** kwargs )
existing_list = [ six . text_type ( pname ) for pname in project_list if project_list [ pname ] [ 'id' ] in access_list [ flavor_id ] ]
defined_list = [ six . text_type ( project ) for project in projects ]
add_list = set ( defined_list ) - set ( existing_list )
remove_list = set ( existing_list ) - set ( defined_list )
if not add_list and not remove_list :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Flavor "{0}" access list corresponds to defined one.' . format ( name )
else :
if dry_run :
ret [ 'result' ] = None
ret [ 'comment' ] = 'Flavor "{0}" access list would be corrected.' . format ( name )
ret [ 'changes' ] = { name : { 'new' : defined_list , 'old' : existing_list } }
else :
added = [ ]
removed = [ ]
if add_list :
for project in add_list :
added . append ( __salt__ [ 'nova.flavor_access_add' ] ( flavor_id , project_list [ project ] [ 'id' ] , ** kwargs ) )
if remove_list :
for project in remove_list :
removed . append ( __salt__ [ 'nova.flavor_access_remove' ] ( flavor_id , project_list [ project ] [ 'id' ] , ** kwargs ) )
if any ( add_list ) or any ( remove_list ) :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Flavor "{0}" access list corrected.' . format ( name )
ret [ 'changes' ] = { name : { 'new' : defined_list , 'old' : existing_list } }
return ret
|
def terminate ( self , unique_id , configs = None ) :
"""Issues a kill - 15 to the specified process
: Parameter unique _ id : the name of the process"""
|
self . _send_signal ( unique_id , signal . SIGTERM , configs )
|
def change_scroll ( self , position ) :
"""Args :
position ( int ) : Vertical location to end scroll window
Change scroll window"""
|
self . stream . write ( self . hide_cursor )
self . stream . write ( self . csr ( 0 , position ) )
self . stream . write ( self . move ( position , 0 ) )
|
def focusOutEvent ( self , event ) :
"""Updates the focus in state for this edit .
: param event | < QFocusEvent >"""
|
super ( XLineEdit , self ) . focusOutEvent ( event )
self . _focusedIn = False
|
def _route_flags ( rflags ) :
'''https : / / github . com / torvalds / linux / blob / master / include / uapi / linux / route . h
https : / / github . com / torvalds / linux / blob / master / include / uapi / linux / ipv6 _ route . h'''
|
flags = ''
fmap = { 0x0001 : 'U' , # RTF _ UP , route is up
0x0002 : 'G' , # RTF _ GATEWAY , use gateway
0x0004 : 'H' , # RTF _ HOST , target is a host
0x0008 : 'R' , # RET _ REINSTATE , reinstate route for dynamic routing
0x0010 : 'D' , # RTF _ DYNAMIC , dynamically installed by daemon or redirect
0x0020 : 'M' , # RTF _ MODIFIED , modified from routing daemon or redirect
0x00040000 : 'A' , # RTF _ ADDRCONF , installed by addrconf
0x01000000 : 'C' , # RTF _ CACHE , cache entry
0x0200 : '!' , # RTF _ REJECT , reject route
}
for item in fmap :
if rflags & item :
flags += fmap [ item ]
return flags
|
def install_cache ( expire_after = 12 * 3600 , cache_post = False ) :
"""Patches the requests library with requests _ cache ."""
|
allowable_methods = [ 'GET' ]
if cache_post :
allowable_methods . append ( 'POST' )
requests_cache . install_cache ( expire_after = expire_after , allowable_methods = allowable_methods )
|
def pop_all ( self ) :
"""Preserve the context stack by transferring it to a new instance"""
|
ret = ExitStack ( )
ret . _context_stack . append ( self . _context_stack . pop ( ) )
self . _context_stack . append ( [ ] )
|
def _parse_on_demand_syllabus ( self , course_name , page , reverse = False , unrestricted_filenames = False , subtitle_language = 'en' , video_resolution = None , download_quizzes = False , mathjax_cdn_url = None , download_notebooks = False ) :
"""Parse a Coursera on - demand course listing / syllabus page .
@ return : Tuple of ( bool , list ) , where bool indicates whether
there was at least on error while parsing syllabus , the list
is a list of parsed modules .
@ rtype : ( bool , list )"""
|
dom = json . loads ( page )
class_id = dom [ 'elements' ] [ 0 ] [ 'id' ]
logging . info ( 'Parsing syllabus of on-demand course (id=%s). ' 'This may take some time, please be patient ...' , class_id )
modules = [ ]
json_modules = dom [ 'linked' ] [ 'onDemandCourseMaterialItems.v2' ]
course = CourseraOnDemand ( session = self . _session , course_id = class_id , course_name = course_name , unrestricted_filenames = unrestricted_filenames , mathjax_cdn_url = mathjax_cdn_url )
course . obtain_user_id ( )
ondemand_material_items = OnDemandCourseMaterialItemsV1 . create ( session = self . _session , course_name = course_name )
if is_debug_run ( ) :
spit_json ( dom , '%s-syllabus-raw.json' % course_name )
spit_json ( json_modules , '%s-material-items-v2.json' % course_name )
spit_json ( ondemand_material_items . _items , '%s-course-material-items.json' % course_name )
error_occurred = False
all_modules = ModulesV1 . from_json ( dom [ 'linked' ] [ 'onDemandCourseMaterialModules.v1' ] )
all_lessons = LessonsV1 . from_json ( dom [ 'linked' ] [ 'onDemandCourseMaterialLessons.v1' ] )
all_items = ItemsV2 . from_json ( dom [ 'linked' ] [ 'onDemandCourseMaterialItems.v2' ] )
for module in all_modules :
logging . info ( 'Processing module %s' , module . slug )
lessons = [ ]
for section in module . children ( all_lessons ) :
logging . info ( 'Processing section %s' , section . slug )
lectures = [ ]
available_lectures = section . children ( all_items )
# Certain modules may be empty - looking programming assignments
# e . g . in data - structures , algorithms - on - graphs ondemand
# courses
if not available_lectures :
lecture = ondemand_material_items . get ( section . id )
if lecture is not None :
available_lectures = [ lecture ]
for lecture in available_lectures :
typename = lecture . type_name
logging . info ( 'Processing lecture %s (%s)' , lecture . slug , typename )
# Empty dictionary means there were no data
# None means an error occurred
links = { }
if typename == 'lecture' : # lecture _ video _ id = lecture [ ' content ' ] [ ' definition ' ] [ ' videoId ' ]
# assets = lecture [ ' content ' ] [ ' definition ' ] . get (
# ' assets ' , [ ] )
lecture_video_id = lecture . id
# assets = [ ]
links = course . extract_links_from_lecture ( class_id , lecture_video_id , subtitle_language , video_resolution )
elif typename == 'supplement' :
links = course . extract_links_from_supplement ( lecture . id )
elif typename == 'phasedPeer' :
links = course . extract_links_from_peer_assignment ( lecture . id )
elif typename in ( 'gradedProgramming' , 'ungradedProgramming' ) :
links = course . extract_links_from_programming ( lecture . id )
elif typename == 'quiz' :
if download_quizzes :
links = course . extract_links_from_quiz ( lecture . id )
elif typename == 'exam' :
if download_quizzes :
links = course . extract_links_from_exam ( lecture . id )
elif typename == 'programming' :
if download_quizzes :
links = course . extract_links_from_programming_immediate_instructions ( lecture . id )
elif typename == 'notebook' :
if download_notebooks and not self . _notebook_downloaded :
logging . warning ( 'According to notebooks platform, content will be downloaded first' )
links = course . extract_links_from_notebook ( lecture . id )
self . _notebook_downloaded = True
else :
logging . info ( 'Unsupported typename "%s" in lecture "%s" (lecture id "%s")' , typename , lecture . slug , lecture . id )
continue
if links is None :
error_occurred = True
elif links :
lectures . append ( ( lecture . slug , links ) )
if lectures :
lessons . append ( ( section . slug , lectures ) )
if lessons :
modules . append ( ( module . slug , lessons ) )
if modules and reverse :
modules . reverse ( )
# Processing resources section
json_references = course . extract_references_poll ( )
references = [ ]
if json_references :
logging . info ( 'Processing resources' )
for json_reference in json_references :
reference = [ ]
reference_slug = json_reference [ 'slug' ]
logging . info ( 'Processing resource %s' , reference_slug )
links = course . extract_links_from_reference ( json_reference [ 'shortId' ] )
if links is None :
error_occurred = True
elif links :
reference . append ( ( '' , links ) )
if reference :
references . append ( ( reference_slug , reference ) )
if references :
modules . append ( ( "Resources" , references ) )
return error_occurred , modules
|
def build_filename ( self , binary ) :
"""Return the proposed filename with extension for the binary ."""
|
template = '%(APP)s-%(VERSION)s.%(LOCALE)s.%(PLATFORM)s%(STUB)s' '.%(EXT)s'
return template % { 'APP' : self . application , 'VERSION' : self . version , 'LOCALE' : self . locale , 'PLATFORM' : self . platform , 'STUB' : '-stub' if self . is_stub_installer else '' , 'EXT' : self . extension }
|
def gen_etree ( self ) :
"""convert an RST tree ( DGParentedTree - > lxml etree )"""
|
relations_elem = self . gen_relations ( )
header = E ( 'header' )
header . append ( relations_elem )
self . gen_body ( )
tree = E ( 'rst' )
tree . append ( header )
# The < body > contains both < segment > , as well as < group > elements .
# While the order of the elements should theoretically be irrelevant ,
# rs3 files usually list the segments before the groups .
body = E ( 'body' )
for segment in self . body [ 'segments' ] :
body . append ( segment )
for group in self . body [ 'groups' ] :
body . append ( group )
tree . append ( body )
return tree
|
def _run_seq ( self , size ) :
"""Send the contents of self [ ' SEQ ' ] to the chip and wait until it finishes ."""
|
# Write the sequence to the sequence generator ( hw driver )
self [ 'SEQ' ] . write ( size )
# write pattern to memory
self [ 'SEQ' ] . set_size ( size )
# set size
self [ 'SEQ' ] . set_repeat ( 1 )
# set repeat
for _ in range ( 1 ) :
self [ 'SEQ' ] . start ( )
# start
while not self [ 'SEQ' ] . get_done ( ) : # time . sleep ( 0.1)
print ( "Wait for done..." )
|
def participants ( self ) :
"""Access the participants
: returns : twilio . rest . api . v2010 . account . conference . participant . ParticipantList
: rtype : twilio . rest . api . v2010 . account . conference . participant . ParticipantList"""
|
if self . _participants is None :
self . _participants = ParticipantList ( self . _version , account_sid = self . _solution [ 'account_sid' ] , conference_sid = self . _solution [ 'sid' ] , )
return self . _participants
|
def target_power ( self ) :
"""Setting this to ` True ` will activate the power pins ( 4 and 6 ) . If
set to ` False ` the power will be deactivated .
Raises an : exc : ` IOError ` if the hardware adapter does not support
the switchable power pins ."""
|
ret = api . py_aa_target_power ( self . handle , TARGET_POWER_QUERY )
_raise_error_if_negative ( ret )
return ret
|
def __select_nearest_ws ( jsondata , latitude , longitude ) :
"""Select the nearest weatherstation ."""
|
log . debug ( "__select_nearest_ws: latitude: %s, longitude: %s" , latitude , longitude )
dist = 0
dist2 = 0
loc_data = None
try :
ws_json = jsondata [ __ACTUAL ]
ws_json = ws_json [ __STATIONMEASUREMENTS ]
except ( KeyError , TypeError ) :
log . warning ( "Missing section in Buienradar xmldata (%s)." "Can happen 00:00-01:00 CE(S)T" , __STATIONMEASUREMENTS )
return None
for wstation in ws_json :
dist2 = __get_ws_distance ( wstation , latitude , longitude )
if dist2 is not None :
if ( ( loc_data is None ) or ( dist2 < dist ) ) :
dist = dist2
loc_data = wstation
if loc_data is None :
log . warning ( "No weatherstation selected; aborting..." )
return None
else :
try :
log . debug ( "Selected weatherstation: code='%s', " "name='%s', lat='%s', lon='%s'." , loc_data [ __STATIONID ] , loc_data [ __STATIONNAME ] , loc_data [ __LAT ] , loc_data [ __LON ] )
except KeyError :
log . debug ( "Selected weatherstation" )
return loc_data
|
def _model ( self , beta ) :
"""Creates the structure of the model
Parameters
beta : np . array
Contains untransformed starting values for latent variables
Returns
lambda : np . array
Contains the values for the conditional volatility series
Y : np . array
Contains the length - adjusted time series ( accounting for lags )
scores : np . array
Contains the score terms for the time series"""
|
Y = np . array ( self . data [ self . max_lag : self . data . shape [ 0 ] ] )
X = np . ones ( Y . shape [ 0 ] )
scores = np . zeros ( Y . shape [ 0 ] )
# Transform latent variables
parm = np . array ( [ self . latent_variables . z_list [ k ] . prior . transform ( beta [ k ] ) for k in range ( beta . shape [ 0 ] ) ] )
lmda = np . ones ( Y . shape [ 0 ] ) * parm [ 0 ]
theta = np . ones ( Y . shape [ 0 ] ) * parm [ - 1 ]
# Loop over time series
for t in range ( 0 , Y . shape [ 0 ] ) :
if t < self . max_lag :
lmda [ t ] = parm [ 0 ] / ( 1 - np . sum ( parm [ 1 : ( self . p + 1 ) ] ) )
theta [ t ] += ( parm [ - 3 ] - ( 1.0 / parm [ - 3 ] ) ) * np . exp ( lmda [ t ] ) * ( np . sqrt ( parm [ - 2 ] ) * sp . gamma ( ( parm [ - 2 ] - 1.0 ) / 2.0 ) ) / ( np . sqrt ( np . pi ) * sp . gamma ( parm [ - 2 ] / 2.0 ) )
else : # Loop over GARCH terms
for p_term in range ( 0 , self . p ) :
lmda [ t ] += parm [ 1 + p_term ] * lmda [ t - p_term - 1 ]
# Loop over Score terms
for q_term in range ( 0 , self . q ) :
lmda [ t ] += parm [ 1 + self . p + q_term ] * scores [ t - q_term - 1 ]
if self . leverage is True :
lmda [ t ] += parm [ - 4 ] * np . sign ( - ( Y [ t - 1 ] - theta [ t - 1 ] ) ) * ( scores [ t - 1 ] + 1 )
theta [ t ] += ( parm [ - 3 ] - ( 1.0 / parm [ - 3 ] ) ) * np . exp ( lmda [ t ] / 2.0 ) * ( np . sqrt ( parm [ - 2 ] ) * sp . gamma ( ( parm [ - 2 ] - 1.0 ) / 2.0 ) ) / ( np . sqrt ( np . pi ) * sp . gamma ( parm [ - 2 ] / 2.0 ) )
if ( Y [ t ] - theta [ t ] ) >= 0 :
scores [ t ] = ( ( ( parm [ - 2 ] + 1.0 ) * np . power ( Y [ t ] - theta [ t ] , 2 ) ) / float ( np . power ( parm [ - 3 ] , 2 ) * parm [ - 2 ] * np . exp ( lmda [ t ] ) + np . power ( Y [ t ] - theta [ t ] , 2 ) ) ) - 1.0
else :
scores [ t ] = ( ( ( parm [ - 2 ] + 1.0 ) * np . power ( Y [ t ] - theta [ t ] , 2 ) ) / float ( np . power ( parm [ - 3 ] , - 2 ) * parm [ - 2 ] * np . exp ( lmda [ t ] ) + np . power ( Y [ t ] - theta [ t ] , 2 ) ) ) - 1.0
return lmda , Y , scores , theta
|
def has_ndarray_int_columns ( features , X ) :
"""Checks if numeric feature columns exist in ndarray"""
|
_ , ncols = X . shape
if not all ( d . isdigit ( ) for d in features if isinstance ( d , str ) ) or not isinstance ( X , np . ndarray ) :
return False
ndarray_columns = np . arange ( 0 , ncols )
feature_cols = np . unique ( [ int ( d ) for d in features ] )
return all ( np . in1d ( feature_cols , ndarray_columns ) )
|
def strings_equal ( s1 , s2 ) :
"""Timing - attack resistant string comparison .
Normal comparison using = = will short - circuit on the first mismatching
character . This avoids that by scanning the whole string , though we
still reveal to a timing attack whether the strings are the same
length ."""
|
try :
s1 = unicodedata . normalize ( 'NFKC' , str ( s1 ) )
s2 = unicodedata . normalize ( 'NFKC' , str ( s2 ) )
except :
s1 = unicodedata . normalize ( 'NFKC' , unicode ( s1 ) )
s2 = unicodedata . normalize ( 'NFKC' , unicode ( s2 ) )
return compare_digest ( s1 , s2 )
|
def notify ( self , force_notify = None , use_email = None , use_sms = None , ** kwargs ) :
"""Overridden to only call ` notify ` if model matches ."""
|
notified = False
instance = kwargs . get ( "instance" )
if instance . _meta . label_lower == self . model :
notified = super ( ) . notify ( force_notify = force_notify , use_email = use_email , use_sms = use_sms , ** kwargs , )
return notified
|
def _process_pod_rate ( self , metric_name , metric , scraper_config ) :
"""Takes a simple metric about a pod , reports it as a rate .
If several series are found for a given pod , values are summed before submission ."""
|
if metric . type not in METRIC_TYPES :
self . log . error ( "Metric type %s unsupported for metric %s" % ( metric . type , metric . name ) )
return
samples = self . _sum_values_by_context ( metric , self . _get_pod_uid_if_pod_metric )
for pod_uid , sample in iteritems ( samples ) :
if '.network.' in metric_name and self . _is_pod_host_networked ( pod_uid ) :
continue
tags = tagger . tag ( 'kubernetes_pod://%s' % pod_uid , tagger . HIGH )
tags += scraper_config [ 'custom_tags' ]
val = sample [ self . SAMPLE_VALUE ]
self . rate ( metric_name , val , tags )
|
def unregister_layout ( self , name ) :
"""Unregisters given layout .
: param name : Layout name .
: type name : unicode
: param layout : Layout object .
: type layout : Layout
: return : Method success .
: rtype : bool"""
|
if not name in self :
raise umbra . exceptions . LayoutRegistrationError ( "{0} | '{1}' layout is not registered!" . format ( self . __class__ . __name__ , name ) )
del ( self . __layouts [ name ] )
return True
|
def run_loop ( self ) :
"""Main entry point for running in interactive mode ."""
|
self . root_command . prog = ''
history_file = self . load_history ( )
rendering . vtmlprint ( self . intro )
try :
self . loop ( )
finally :
readline . write_history_file ( history_file )
|
def GetEnabledInterfaces ( ) :
"""Gives a list of enabled interfaces . Should work on all windows versions .
Returns :
interfaces : Names of interfaces found enabled ."""
|
interfaces = [ ]
show_args = [ '/c' , 'netsh' , 'show' , 'interface' ]
# pylint : disable = undefined - variable
res = client_utils_common . Execute ( 'cmd' , show_args , time_limit = - 1 , bypass_whitelist = True )
pattern = re . compile ( r'\s*' )
for line in res [ 0 ] . split ( '\r\n' ) : # res [ 0 ] is stdout .
interface_info = pattern . split ( line )
if 'Enabled' in interface_info :
interfaces . extend ( interface_info [ - 1 : ] )
return interfaces
|
def rnd_date_list_high_performance ( size , start = date ( 1970 , 1 , 1 ) , end = None , ** kwargs ) :
"""Generate mass random date .
: param size : int , number of
: param start : date similar object , int / str / date / datetime
: param end : date similar object , int / str / date / datetime , default today ' s date
: param kwargs : args placeholder
: return : list of datetime . date"""
|
if end is None :
end = date . today ( )
start_days = to_ordinal ( parser . parse_datetime ( start ) )
end_days = to_ordinal ( parser . parse_datetime ( end ) )
_assert_correct_start_end ( start_days , end_days )
if has_np : # pragma : no cover
return [ from_ordinal ( days ) for days in np . random . randint ( start_days , end_days , size ) ]
else :
return [ from_ordinal ( random . randint ( start_days , end_days ) ) for _ in range ( size ) ]
|
def data_from_query ( self , cmd ) :
"""Callback for . execute _ command ( ) for DELETE / GET / HEAD requests"""
|
res = None
ckey = "%s /%s" % ( self . command , cmd )
if not isinstance ( self . _query_params , dict ) :
self . _query_params = { }
if ckey in _NCMD :
self . _cmd = _NCMD [ ckey ]
else :
for key in sorted ( _RCMD , key = len , reverse = True ) :
if not key . startswith ( "%s " % self . command ) :
continue
m = _RCMD [ key ] . name . match ( cmd )
if m :
self . _cmd = _RCMD [ key ]
self . _query_params . update ( m . groupdict ( ) )
break
try :
if not self . _cmd :
raise self . req_error ( 404 )
if not self . _cmd . to_log :
self . _to_log = False
if self . _cmd . to_auth :
self . authenticate ( self . _cmd . auth_users )
if self . _cmd . static :
if self . _cmd . handler :
res = self . _cmd . handler ( self )
return self . static_file ( cmd , res )
res = self . _cmd . handler ( self )
if not isinstance ( res , HttpResponse ) :
return self . response_dumps ( res )
return res
finally :
self . _query_params = { }
|
def require_dataset ( self , name , shape , dtype = None , exact = False , ** kwargs ) :
"""Obtain an array , creating if it doesn ' t exist . Other ` kwargs ` are
as per : func : ` zarr . hierarchy . Group . create _ dataset ` .
Parameters
name : string
Array name .
shape : int or tuple of ints
Array shape .
dtype : string or dtype , optional
NumPy dtype .
exact : bool , optional
If True , require ` dtype ` to match exactly . If false , require
` dtype ` can be cast from array dtype ."""
|
return self . _write_op ( self . _require_dataset_nosync , name , shape = shape , dtype = dtype , exact = exact , ** kwargs )
|
def convert_to_string ( self , block ) :
"""Makes gene _ block as str from list of SeqRecordExpanded objects of a gene _ code .
Override this function if the dataset block needs to be different
due to file format .
This block will need to be split further if the dataset is FASTA or
TNT and the partitioning scheme is 1st - 2nd , 3rd .
As the dataset is split into several blocks due to 1st - 2nd , 3rd
we cannot translate to aminoacids or degenerate the sequences ."""
|
if self . partitioning != '1st-2nd, 3rd' :
return self . make_datablock_by_gene ( block )
else :
if self . format == 'FASTA' :
return self . make_datablock_considering_codon_positions_as_fasta_format ( block )
else :
return self . make_datablock_by_gene ( block )
|
def plot_ratio_return ( self , isox , isoy , deltax = True , deltay = True ) :
'''This routine returns data isotopic data to plot from the
filtered list of data .
Parameters
isox : list
Isotopes for x axis in standard format [ ' Si - 28 ' , ' Si - 30 ' ] .
isoy : list
Same as isox but for y axis .
deltax : boolean , optional
If true then x - axis values are in delta format . The default
is True .
deltay : boolean , optional
Same as for x - axis but for y - axis . The default is True .
Returns
grpl _ xdata
grain plot x - axis data .
grpl _ xerr
x - axis error bars .
grpl _ ydata
grain plot y - axis data .
grpl _ yerr
y - axis error bars .
grpl _ style
style data for the different symbols .'''
|
# check availability
index_x , delta_b_x , ratio_b_x = self . check_availability ( isox )
index_y , delta_b_y , ratio_b_y = self . check_availability ( isoy )
if index_x == - 1 or index_y == - 1 :
print ( 'Following input data are not available in the database. Revise your input.' )
if index_x == - 1 :
print ( 'x axis data not available' )
if index_y == - 1 :
print ( 'y axis data not available' )
return None
# create x and y data as 1d vectors , also error bars
xdata_vec = np . zeros ( ( len ( self . data ) ) )
ydata_vec = np . zeros ( ( len ( self . data ) ) )
xdata_err = np . zeros ( ( len ( self . data ) ) )
ydata_err = np . zeros ( ( len ( self . data ) ) )
for it in range ( len ( self . data ) ) :
xdata_vec [ it ] = self . data [ it ] [ index_x ]
ydata_vec [ it ] = self . data [ it ] [ index_y ]
xdata_err [ it ] = self . data [ it ] [ index_x + 1 ]
ydata_err [ it ] = self . data [ it ] [ index_y + 1 ]
# index data that are nan
index_nan = [ ]
for it in range ( len ( xdata_vec ) ) :
if np . isnan ( xdata_vec [ it ] ) or np . isnan ( ydata_vec [ it ] ) :
index_nan . append ( it )
# make range of all incides
index_filtered = list ( range ( len ( xdata_vec ) ) )
for it in range ( len ( index_nan ) ) :
index_filtered . remove ( index_nan [ it ] )
xdata_tmp = np . zeros ( ( len ( index_filtered ) ) )
ydata_tmp = np . zeros ( ( len ( index_filtered ) ) )
xerr_tmp = np . zeros ( ( len ( index_filtered ) ) )
yerr_tmp = np . zeros ( ( len ( index_filtered ) ) )
style_plt = np . zeros ( ( len ( index_filtered ) , len ( self . header_style ) ) , dtype = '|S1024' )
for i in range ( len ( index_filtered ) ) :
xdata_tmp [ i ] = xdata_vec [ index_filtered [ i ] ]
ydata_tmp [ i ] = ydata_vec [ index_filtered [ i ] ]
xerr_tmp [ i ] = xdata_err [ index_filtered [ i ] ]
yerr_tmp [ i ] = ydata_err [ index_filtered [ i ] ]
for j in range ( len ( style_plt [ i ] ) ) :
style_plt [ i ] [ j ] = self . style [ index_filtered [ i ] ] [ j ]
xdata_vec = xdata_tmp
ydata_vec = ydata_tmp
xdata_err = xerr_tmp
ydata_err = yerr_tmp
# loop through error and set nans to 0
for i in range ( len ( xdata_err ) ) :
if np . isnan ( xdata_err [ i ] ) :
xdata_err [ i ] = 0.
if np . isnan ( ydata_err [ i ] ) :
ydata_err [ i ] = 0.
# make start stop index for groups
start_stop = [ ]
start = 0
for it in range ( len ( xdata_vec ) - 1 ) :
if ( style_plt [ it ] == style_plt [ it + 1 ] ) . all ( ) :
continue
else :
stop = it + 1
start_stop . append ( [ start , stop ] )
start = stop
# last entry
if start_stop == [ ] :
start_stop . append ( [ 0 , len ( xdata_vec ) ] )
else :
start_stop . append ( [ start_stop [ len ( start_stop ) - 1 ] [ 1 ] , len ( xdata_vec ) + 1 ] )
# now append things to return variables
grain_plt_xdata = [ ]
grain_plt_ydata = [ ]
grain_plt_xerr = [ ]
grain_plt_yerr = [ ]
grain_plt_style = [ ]
for i in range ( len ( start_stop ) ) :
grain_plt_xdata . append ( xdata_vec [ start_stop [ i ] [ 0 ] : start_stop [ i ] [ 1 ] ] )
grain_plt_ydata . append ( ydata_vec [ start_stop [ i ] [ 0 ] : start_stop [ i ] [ 1 ] ] )
grain_plt_xerr . append ( xdata_err [ start_stop [ i ] [ 0 ] : start_stop [ i ] [ 1 ] ] )
grain_plt_yerr . append ( ydata_err [ start_stop [ i ] [ 0 ] : start_stop [ i ] [ 1 ] ] )
grain_plt_style . append ( style_plt [ start_stop [ i ] [ 0 ] ] )
return [ grain_plt_xdata , grain_plt_xerr , grain_plt_ydata , grain_plt_yerr , grain_plt_style ]
|
def is_Type ( tp ) :
"""Python version independent check if an object is a type .
For Python 3.7 onwards ( ? ) this is not equivalent to
` ` isinstance ( tp , type ) ` ` any more , as that call would return
` ` False ` ` for PEP 484 types .
Tested with CPython 2.7 , 3.5 , 3.6 , 3.7 and Jython 2.7.1."""
|
if isinstance ( tp , type ) :
return True
try :
typing . _type_check ( tp , '' )
return True
except TypeError :
return False
|
def keyPressEvent ( self , event ) :
"""Listens for the escape key to cancel out from this snapshot .
: param event | < QKeyPressEvent >"""
|
# reject on a cancel
if event . key ( ) == Qt . Key_Escape :
self . reject ( )
super ( XSnapshotWidget , self ) . keyPressEvent ( event )
|
def make_header ( self ) :
"""Builds and returns FITS header for this HEALPix map"""
|
cards = [ fits . Card ( "TELESCOP" , "GLAST" ) , fits . Card ( "INSTRUME" , "LAT" ) , fits . Card ( self . _conv . coordsys , self . _coordsys ) , fits . Card ( "PIXTYPE" , "HEALPIX" ) , fits . Card ( "ORDERING" , self . ordering ) , fits . Card ( "ORDER" , self . _order ) , fits . Card ( "NSIDE" , self . _nside ) , fits . Card ( "FIRSTPIX" , 0 ) , fits . Card ( "LASTPIX" , self . _maxpix - 1 ) , fits . Card ( "HPX_CONV" , self . _conv . convname ) ]
if self . _coordsys == "CEL" :
cards . append ( fits . Card ( "EQUINOX" , 2000.0 , "Equinox of RA & DEC specifications" ) )
if self . _region is not None :
cards . append ( fits . Card ( "HPX_REG" , self . _region ) )
cards . append ( fits . Card ( "INDXSCHM" , "PARTIAL" ) )
elif self . _ipix is not None :
cards . append ( fits . Card ( "INDXSCHM" , "EXPLICIT" ) )
else :
if self . _conv . convname in [ 'FGST_SRCMAP_SPARSE' ] :
cards . append ( fits . Card ( "INDXSCHM" , "SPARSE" ) )
else :
cards . append ( fits . Card ( "INDXSCHM" , "IMPLICIT" ) )
header = fits . Header ( cards )
return header
|
def build_indexes ( connection , verbose = False ) :
"""Using the how _ to _ index annotations in the table class definitions ,
construct a set of indexes for the database at the given
connection ."""
|
cursor = connection . cursor ( )
for table_name in get_table_names ( connection ) : # FIXME : figure out how to do this extensibly
if table_name in TableByName :
how_to_index = TableByName [ table_name ] . how_to_index
elif table_name in lsctables . TableByName :
how_to_index = lsctables . TableByName [ table_name ] . how_to_index
else :
continue
if how_to_index is not None :
if verbose :
print >> sys . stderr , "indexing %s table ..." % table_name
for index_name , cols in how_to_index . iteritems ( ) :
cursor . execute ( "CREATE INDEX IF NOT EXISTS %s ON %s (%s)" % ( index_name , table_name , "," . join ( cols ) ) )
connection . commit ( )
|
def decode_privkey_hex ( privkey_hex ) :
"""Decode a private key for ecdsa signature"""
|
if not isinstance ( privkey_hex , ( str , unicode ) ) :
raise ValueError ( "private key is not a string" )
# force uncompressed
priv = str ( privkey_hex )
if len ( priv ) > 64 :
if priv [ - 2 : ] != '01' :
raise ValueError ( "private key does not end in '01'" )
priv = priv [ : 64 ]
pk_i = int ( priv , 16 )
return pk_i
|
def secondary_structure_summary ( dssp_df ) :
"""Summarize the secondary structure content of the DSSP dataframe for each chain .
Args :
dssp _ df : Pandas DataFrame of parsed DSSP results
Returns :
dict : Chain to secondary structure summary dictionary"""
|
chains = dssp_df . chain . unique ( )
infodict = { }
for chain in chains :
expoinfo = defaultdict ( int )
chain_df = dssp_df [ dssp_df . chain == chain ]
counts = chain_df . ss . value_counts ( )
total = float ( len ( chain_df ) )
for ss , count in iteritems ( counts ) :
if ss == '-' :
expoinfo [ 'percent_C-dssp' ] = count / total
if ss == 'H' :
expoinfo [ 'percent_H-dssp' ] = count / total
if ss == 'B' :
expoinfo [ 'percent_B-dssp' ] = count / total
if ss == 'E' :
expoinfo [ 'percent_E-dssp' ] = count / total
if ss == 'G' :
expoinfo [ 'percent_G-dssp' ] = count / total
if ss == 'I' :
expoinfo [ 'percent_I-dssp' ] = count / total
if ss == 'T' :
expoinfo [ 'percent_T-dssp' ] = count / total
if ss == 'S' :
expoinfo [ 'percent_S-dssp' ] = count / total
# Filling in 0 percenters
for per in [ 'percent_C-dssp' , 'percent_H-dssp' , 'percent_B-dssp' , 'percent_E-dssp' , 'percent_G-dssp' , 'percent_I-dssp' , 'percent_T-dssp' , 'percent_S-dssp' ] :
if per not in expoinfo :
expoinfo [ per ] = 0.0
infodict [ chain ] = dict ( expoinfo )
return infodict
|
def make_line ( self , response = None , importance = None , base = None , tag = None , features = None , namespaces = None , ) :
"""Makes and returns an example string in VW syntax .
If given , ' response ' , ' importance ' , ' base ' , and ' tag ' are used
to label the example . Features for the example come from
any given features or namespaces , as well as any previously
added namespaces ( using them up in the process ) ."""
|
if namespaces is not None :
self . add_namespaces ( namespaces )
if features is not None :
namespace = Namespace ( features = features )
self . add_namespace ( namespace )
substrings = [ ]
tokens = [ ]
if response is not None :
token = str ( response )
tokens . append ( token )
if importance is not None : # Check only if response is given
token = str ( importance )
tokens . append ( token )
if base is not None : # Check only if importance is given
token = str ( base )
tokens . append ( token )
if tag is not None :
token = "'" + str ( tag )
# Tags are unambiguous if given a ' prefix
tokens . append ( token )
else :
token = ""
# Spacing element to avoid ambiguity in parsing
tokens . append ( token )
substring = ' ' . join ( tokens )
substrings . append ( substring )
if self . namespaces :
for namespace in self . namespaces :
substring = namespace . to_string ( )
substrings . append ( substring )
else :
substrings . append ( '' )
# For correct syntax
line = '|' . join ( substrings )
self . _line = line
self . namespaces = [ ]
# Reset namespaces after their use
return line
|
def _pad_bytes ( request_bytes ) :
""": type request _ bytes : bytes
: rtype : bytes"""
|
padding_length = ( _BLOCK_SIZE - len ( request_bytes ) % _BLOCK_SIZE )
padding_character = bytes ( bytearray ( [ padding_length ] ) )
return request_bytes + padding_character * padding_length
|
def raise_check_result ( self ) :
"""Raise ACTIVE CHECK RESULT entry
Example : " ACTIVE HOST CHECK : server ; DOWN ; HARD ; 1 ; I don ' t know what to say . . . "
: return : None"""
|
if not self . __class__ . log_active_checks :
return
log_level = 'info'
if self . state == 'DOWN' :
log_level = 'error'
elif self . state == 'UNREACHABLE' :
log_level = 'warning'
brok = make_monitoring_log ( log_level , 'ACTIVE HOST CHECK: %s;%s;%d;%s' % ( self . get_name ( ) , self . state , self . attempt , self . output ) )
self . broks . append ( brok )
|
def derive_fields ( self ) :
"""Derives our fields ."""
|
if self . fields is not None :
fields = list ( self . fields )
else :
form = self . form
fields = [ ]
for field in form :
fields . append ( field . name )
# this is slightly confusing but we add in readonly fields here because they will still
# need to be displayed
readonly = self . derive_readonly ( )
if readonly :
fields += readonly
# remove any excluded fields
for exclude in self . derive_exclude ( ) :
if exclude in fields :
fields . remove ( exclude )
return fields
|
def text_search ( self , query = None , language = lang . ENGLISH , lat_lng = None , radius = 3200 , type = None , types = [ ] , location = None , pagetoken = None ) :
"""Perform a text search using the Google Places API .
Only the one of the query or pagetoken kwargs are required , the rest of the
keyword arguments are optional .
keyword arguments :
lat _ lng - - A dict containing the following keys : lat , lng
( default None )
location - - A human readable location , e . g ' London , England '
( default None )
pagetoken - - Optional parameter to force the search result to return the next
20 results from a previously run search . Setting this parameter
will execute a search with the same parameters used previously .
( default None )
radius - - The radius ( in meters ) around the location / lat _ lng to
restrict the search to . The maximum is 50000 meters .
( default 3200)
query - - The text string on which to search , for example :
" Restaurant in New York " .
type - - Optional type param used to indicate place category .
types - - An optional list of types , restricting the results to
Places ( default [ ] ) . If there is only one item the request
will be send as type param ."""
|
self . _request_params = { 'query' : query }
if lat_lng is not None or location is not None :
lat_lng_str = self . _generate_lat_lng_string ( lat_lng , location )
self . _request_params [ 'location' ] = lat_lng_str
self . _request_params [ 'radius' ] = radius
if type :
self . _request_params [ 'type' ] = type
elif types :
if len ( types ) == 1 :
self . _request_params [ 'type' ] = types [ 0 ]
elif len ( types ) > 1 :
self . _request_params [ 'types' ] = '|' . join ( types )
if language is not None :
self . _request_params [ 'language' ] = language
if pagetoken is not None :
self . _request_params [ 'pagetoken' ] = pagetoken
self . _add_required_param_keys ( )
url , places_response = _fetch_remote_json ( GooglePlaces . TEXT_SEARCH_API_URL , self . _request_params )
_validate_response ( url , places_response )
return GooglePlacesSearchResult ( self , places_response )
|
def get_dataset ( self , key , info ) :
"""Read data from file and return the corresponding projectables ."""
|
if key . name in [ 'longitude' , 'latitude' ] :
logger . debug ( 'Reading coordinate arrays.' )
if self . lons is None or self . lats is None :
self . lons , self . lats = self . get_lonlats ( )
if key . name == 'latitude' :
proj = Dataset ( self . lats , id = key , ** info )
else :
proj = Dataset ( self . lons , id = key , ** info )
else :
data = self . get_sds_variable ( key . name )
proj = Dataset ( data , id = key , ** info )
return proj
|
def func ( self ) :
"""func : func name ( paramlist ) block"""
|
self . eat ( TokenTypes . FUNC )
name = Var ( self . cur_token )
self . eat ( TokenTypes . VAR )
self . eat ( TokenTypes . LPAREN )
sig = self . param_list ( )
self . eat ( TokenTypes . RPAREN )
block = self . block ( )
return FunctionDef ( name , Function ( sig , block ) )
|
def who_has ( self , subid ) :
"""Return a list of names who own subid in their id range set ."""
|
answer = [ ]
for name in self . __map :
if subid in self . __map [ name ] and not name in answer :
answer . append ( name )
return answer
|
def current_version ( ) :
"""Get current version of directory - components ."""
|
filepath = os . path . abspath ( project_root / "directory_components" / "version.py" )
version_py = get_file_string ( filepath )
regex = re . compile ( Utils . get_version )
if regex . search ( version_py ) is not None :
current_version = regex . search ( version_py ) . group ( 0 )
print ( color ( "Current directory-components version: {}" . format ( current_version ) , fg = 'blue' , style = 'bold' ) )
get_update_info ( )
else :
print ( color ( 'Error finding directory-components version.' , fg = 'red' , style = 'bold' ) )
|
def cmd ( self , args = None , interact = True ) :
"""Process command - line arguments ."""
|
if args is None :
parsed_args = arguments . parse_args ( )
else :
parsed_args = arguments . parse_args ( args )
self . exit_code = 0
with self . handling_exceptions ( ) :
self . use_args ( parsed_args , interact , original_args = args )
self . exit_on_error ( )
|
def get_entity ( self , etype , entity_id ) :
"""Return entity in this workspace .
Args :
etype ( str ) : Entity type
entity _ id ( str ) : Entity name / unique id"""
|
r = fapi . get_entity ( self . namespace , self . name , etype , entity_id , self . api_url )
fapi . _check_response_code ( r , 200 )
dresp = r . json ( )
return Entity ( etype , entity_id , dresp [ 'attributes' ] )
|
def RegisterArtifact ( self , artifact_rdfvalue , source = "datastore" , overwrite_if_exists = False , overwrite_system_artifacts = False ) :
"""Registers a new artifact ."""
|
artifact_name = artifact_rdfvalue . name
if artifact_name in self . _artifacts :
if not overwrite_if_exists :
details = "artifact already exists and `overwrite_if_exists` is unset"
raise rdf_artifacts . ArtifactDefinitionError ( artifact_name , details )
elif not overwrite_system_artifacts :
artifact_obj = self . _artifacts [ artifact_name ]
if not artifact_obj . loaded_from . startswith ( "datastore:" ) : # This artifact was not uploaded to the datastore but came from a
# file , refuse to overwrite .
details = "system artifact cannot be overwritten"
raise rdf_artifacts . ArtifactDefinitionError ( artifact_name , details )
# Preserve where the artifact was loaded from to help debugging .
artifact_rdfvalue . loaded_from = source
# Clear any stale errors .
artifact_rdfvalue . error_message = None
self . _artifacts [ artifact_rdfvalue . name ] = artifact_rdfvalue
|
def has_name_version ( self , name : str , version : str ) -> bool :
"""Check if there exists a network with the name / version combination in the database ."""
|
return self . session . query ( exists ( ) . where ( and_ ( Network . name == name , Network . version == version ) ) ) . scalar ( )
|
def create_user_id ( self , user_id , app_id , cidr_block = None , mount_point = 'app-id' , ** kwargs ) :
"""POST / auth / < mount point > / map / user - id / < user _ id >
: param user _ id :
: type user _ id :
: param app _ id :
: type app _ id :
: param cidr _ block :
: type cidr _ block :
: param mount _ point :
: type mount _ point :
: param kwargs :
: type kwargs :
: return :
: rtype :"""
|
# user - id can be associated to more than 1 app - id ( aka policy ) . It is easier for the user to
# pass in the policies as a list so if they do , we need to convert to a , delimited string .
if isinstance ( app_id , ( list , set , tuple ) ) :
app_id = ',' . join ( app_id )
params = { 'value' : app_id }
# Only use the cidr _ block if it has a value . Made it a named param for user
# convienence instead of leaving it as part of the kwargs
if cidr_block :
params [ 'cidr_block' ] = cidr_block
params . update ( kwargs )
return self . _adapter . post ( '/v1/auth/{}/map/user-id/{}' . format ( mount_point , user_id ) , json = params )
|
def put_file ( self , key , file ) :
"""Store into key from file on disk
Stores data from a source into key . * file * can either be a string ,
which will be interpretet as a filename , or an object with a * read ( ) *
method .
If the passed object has a * fileno ( ) * method , it may be used to speed
up the operation .
The file specified by * file * , if it is a filename , may be removed in
the process , to avoid copying if possible . If you need to make a copy ,
pass the opened file instead .
: param key : The key under which the data is to be stored
: param file : A filename or an object with a read method . If a filename ,
may be removed
: returns : The key under which data was stored
: raises exceptions . ValueError : If the key is not valid .
: raises exceptions . IOError : If there was a problem moving the file in ."""
|
# FIXME : shouldn ' t we call self . _ check _ valid _ key here ?
if isinstance ( file , str ) :
return self . _put_filename ( key , file )
else :
return self . _put_file ( key , file )
|
def to_array ( self ) :
"""Serializes this AnimationMessage to a dictionary .
: return : dictionary representation of this object .
: rtype : dict"""
|
array = super ( AnimationMessage , self ) . to_array ( )
if isinstance ( self . animation , InputFile ) :
array [ 'animation' ] = self . animation . to_array ( )
# type InputFile
elif isinstance ( self . animation , str ) :
array [ 'animation' ] = u ( self . animation )
# py2 : type unicode , py3 : type str
else :
raise TypeError ( 'Unknown type, must be one of InputFile, str.' )
# end if
if self . receiver is not None :
if isinstance ( self . receiver , None ) :
array [ 'chat_id' ] = None ( self . receiver )
# type Noneelif isinstance ( self . receiver , str ) :
array [ 'chat_id' ] = u ( self . receiver )
# py2 : type unicode , py3 : type str
elif isinstance ( self . receiver , int ) :
array [ 'chat_id' ] = int ( self . receiver )
# type intelse :
raise TypeError ( 'Unknown type, must be one of None, str, int.' )
# end if
if self . reply_id is not None :
if isinstance ( self . reply_id , DEFAULT_MESSAGE_ID ) :
array [ 'reply_to_message_id' ] = DEFAULT_MESSAGE_ID ( self . reply_id )
# type DEFAULT _ MESSAGE _ IDelif isinstance ( self . reply _ id , int ) :
array [ 'reply_to_message_id' ] = int ( self . reply_id )
# type intelse :
raise TypeError ( 'Unknown type, must be one of DEFAULT_MESSAGE_ID, int.' )
# end if
if self . duration is not None :
array [ 'duration' ] = int ( self . duration )
# type int
if self . width is not None :
array [ 'width' ] = int ( self . width )
# type int
if self . height is not None :
array [ 'height' ] = int ( self . height )
# type int
if self . thumb is not None :
if isinstance ( self . thumb , InputFile ) :
array [ 'thumb' ] = self . thumb . to_array ( )
# type InputFile
elif isinstance ( self . thumb , str ) :
array [ 'thumb' ] = u ( self . thumb )
# py2 : type unicode , py3 : type str
else :
raise TypeError ( 'Unknown type, must be one of InputFile, str.' )
# end if
if self . caption is not None :
array [ 'caption' ] = u ( self . caption )
# py2 : type unicode , py3 : type str
if self . parse_mode is not None :
array [ 'parse_mode' ] = u ( self . parse_mode )
# py2 : type unicode , py3 : type str
if self . disable_notification is not None :
array [ 'disable_notification' ] = bool ( self . disable_notification )
# type bool
if self . reply_markup is not None :
if isinstance ( self . reply_markup , InlineKeyboardMarkup ) :
array [ 'reply_markup' ] = self . reply_markup . to_array ( )
# type InlineKeyboardMarkup
elif isinstance ( self . reply_markup , ReplyKeyboardMarkup ) :
array [ 'reply_markup' ] = self . reply_markup . to_array ( )
# type ReplyKeyboardMarkup
elif isinstance ( self . reply_markup , ReplyKeyboardRemove ) :
array [ 'reply_markup' ] = self . reply_markup . to_array ( )
# type ReplyKeyboardRemove
elif isinstance ( self . reply_markup , ForceReply ) :
array [ 'reply_markup' ] = self . reply_markup . to_array ( )
# type ForceReply
else :
raise TypeError ( 'Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.' )
# end if
return array
|
def compress ( self , image_path ) :
'''compress will ( properly ) compress an image'''
|
if os . path . exists ( image_path ) :
compressed_image = "%s.gz" % image_path
os . system ( 'gzip -c -6 %s > %s' % ( image_path , compressed_image ) )
return compressed_image
bot . exit ( "Cannot find image %s" % image_path )
|
def _populate_validated_data_with_sub_field_data ( self , validated_data ) :
"""Move field data nested in ` ModelSubSerializer ` fields back into the
overall validated data dict ."""
|
for fieldname , field in self . get_fields ( ) . items ( ) :
if isinstance ( field , ModelSubSerializer ) :
field_data = validated_data . pop ( fieldname , None )
if field_data :
validated_data . update ( field_data )
|
def calc_finished ( self ) :
'''Check if the lockfile is in the calculation directory .
It is removed by the script at the end regardless of the
success of the calculation . This is totally tied to
implementation and you need to implement your own scheme !'''
|
# print _ stack ( limit = 5)
if not self . calc_running : # print ( ' Calc running : ' , self . calc _ running )
return True
else : # The calc is marked as running check if this is still true
# We do it by external scripts . You need to write these
# scripts for your own system .
# See examples / scripts directory for examples .
with work_dir ( self . working_dir ) :
o = check_output ( [ 'check-job' ] )
# print ( ' Status ' , o )
if o [ 0 ] in b'R' : # Still running - we do nothing to preserve the state
return False
else : # The job is not running maybe it finished maybe crashed
# We hope for the best at this point ad pass to the
# Standard update function
return True
|
def load_yaml_file ( yaml_file ) :
"""Read YAML file ."""
|
try :
import yaml
except ImportError :
sys . exit ( "Unable to import yaml module." )
try :
with io . open ( yaml_file , "rt" , encoding = "utf-8" ) as fname :
return yaml . safe_load ( fname )
except IOError :
sys . exit ( "Unable to open YAML file: {0}" . format ( yaml_file ) )
|
def get_next_del_state ( self , state , ret ) :
"""Return the next delete state from previous state ."""
|
if ret :
if state == fw_const . INIT_STATE :
return state
else :
return state - 1
else :
return state
|
def _is_aborted ( self , cycle , statustext , total_elements = None , freq = None ) :
"""Displays progress and returns True if abort
Parameters
cycle : Integer
\t The current operation cycle
statustext : String
\t Left text in statusbar to be displayed
total _ elements : Integer :
\t The number of elements that have to be processed
freq : Integer , defaults to None
\t No . operations between two abort possibilities , 1000 if None"""
|
if total_elements is None :
statustext += _ ( "{nele} elements processed. Press <Esc> to abort." )
else :
statustext += _ ( "{nele} of {totalele} elements processed. " "Press <Esc> to abort." )
if freq is None :
show_msg = False
freq = 1000
else :
show_msg = True
# Show progress in statusbar each freq ( 1000 ) cells
if cycle % freq == 0 :
if show_msg :
text = statustext . format ( nele = cycle , totalele = total_elements )
try :
post_command_event ( self . main_window , self . StatusBarMsg , text = text )
except TypeError : # The main window does not exist any more
pass
# Now wait for the statusbar update to be written on screen
if is_gtk ( ) :
try :
wx . Yield ( )
except :
pass
# Abort if we have to
if self . need_abort : # We have to abort `
return True
# Continue
return False
|
def make_typecast ( type_ , node , lineno ) :
"""Wrapper : returns a Typecast node"""
|
assert isinstance ( type_ , symbols . TYPE )
return symbols . TYPECAST . make_node ( type_ , node , lineno )
|
def load ( self ) :
"""read dotfile and populate self
opts will override the dotfile settings ,
make sure everything is synced in both
opts and this object"""
|
if self . exists ( ) :
with open ( self . dot_file , 'r' ) as handle :
self . update ( json . load ( handle ) )
if self . options [ 'context' ] is not None :
self [ 'context' ] = self . options [ 'context' ]
else :
self . options [ 'context' ] = self [ 'context' ]
if self . options [ 'defaults' ] is not None :
self [ 'defaults' ] = self . options [ 'defaults' ]
else :
self . options [ 'defaults' ] = self [ 'defaults' ]
if self . options [ 'output' ] is not None :
self [ 'output' ] = self . options [ 'output' ]
if self . options . get ( 'inclusive' , False ) :
self [ 'inclusive' ] = True
if self . options . get ( 'exclude' , [ ] ) :
self [ 'exclude' ] . extend ( self . options [ 'exclude' ] )
if self [ 'output' ] is None :
self [ 'output' ] = os . path . join ( os . getcwd ( ) , 'dockerstache-output' )
self [ 'output_path' ] = self . abs_output_dir ( )
self [ 'input_path' ] = self . abs_input_dir ( )
if self [ 'context' ] is not None :
self [ 'context_path' ] = absolute_path ( self [ 'context' ] )
if self [ 'defaults' ] is not None :
self [ 'defaults_path' ] = absolute_path ( self [ 'defaults' ] )
|
def command ( self , command = None , timestamp = None , element = None , host = None , service = None , user = None , parameters = None ) : # pylint : disable = too - many - branches
"""Request to execute an external command
Allowed parameters are :
` command ` : mandatory parameter containing the whole command line or only the command name
` timestamp ` : optional parameter containing the timestamp . If not present , the
current timestamp is added in the command line
` element ` : the targeted element that will be appended after the command name ( ` command ` ) .
If element contains a ' / ' character it is split to make an host and service .
` host ` , ` service ` or ` user ` : the targeted host , service or user . Takes precedence over
the ` element ` to target a specific element
` parameters ` : the parameter that will be appended after all the arguments
When using this endpoint with the HTTP GET method , the semi colons that are commonly used
to separate the parameters must be replace with % 3B ! This because the ; is an accepted
URL query parameters separator . . .
Indeed , the recommended way of using this endpoint is to use the HTTP POST method .
In case of any error , this function returns an object containing some properties :
' _ status ' : ' ERR ' because of the error
` _ message ` : some more explanations about the error
The ` _ status ` field is ' OK ' with an according ` _ message ` to explain what the Arbiter
will do depending upon the notification . The ` command ` property contains the formatted
external command .
: return : dict"""
|
if cherrypy . request . method in [ "POST" ] :
if not cherrypy . request . json :
return { '_status' : u'ERR' , '_message' : u'You must POST parameters on this endpoint.' }
if command is None :
try :
command = cherrypy . request . json . get ( 'command' , None )
timestamp = cherrypy . request . json . get ( 'timestamp' , None )
element = cherrypy . request . json . get ( 'element' , None )
host = cherrypy . request . json . get ( 'host' , None )
service = cherrypy . request . json . get ( 'service' , None )
user = cherrypy . request . json . get ( 'user' , None )
parameters = cherrypy . request . json . get ( 'parameters' , None )
except AttributeError :
return { '_status' : u'ERR' , '_message' : u'Missing command parameters' }
if not command :
return { '_status' : u'ERR' , '_message' : u'Missing command parameter' }
fields = split_semicolon ( command )
command_line = command . replace ( fields [ 0 ] , fields [ 0 ] . upper ( ) )
if timestamp :
try :
timestamp = int ( timestamp )
except ValueError :
return { '_status' : u'ERR' , '_message' : u'Timestamp must be an integer value' }
command_line = '[%d] %s' % ( timestamp , command_line )
if host or service or user :
if host :
command_line = '%s;%s' % ( command_line , host )
if service :
command_line = '%s;%s' % ( command_line , service )
if user :
command_line = '%s;%s' % ( command_line , user )
elif element :
if '/' in element : # Replace only the first /
element = element . replace ( '/' , ';' , 1 )
command_line = '%s;%s' % ( command_line , element )
if parameters :
command_line = '%s;%s' % ( command_line , parameters )
# Add a command to get managed
logger . warning ( "Got an external command: %s" , command_line )
self . app . add ( ExternalCommand ( command_line ) )
return { '_status' : u'OK' , '_message' : u"Got command: %s" % command_line , 'command' : command_line }
|
def get_info ( self ) :
"""Get current configuration info from ' v ' command ."""
|
re_info = re . compile ( r'\[.*\]' )
self . _write_cmd ( 'v' )
while True :
line = self . _serial . readline ( )
try :
line = line . encode ( ) . decode ( 'utf-8' )
except AttributeError :
line = line . decode ( 'utf-8' )
match = re_info . match ( line )
if match :
return self . _parse_info ( line )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.