signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def save_arguments ( module_name , elements ) :
"""Recursively save arguments name and default value ."""
|
for elem , signature in elements . items ( ) :
if isinstance ( signature , dict ) : # Submodule case
save_arguments ( module_name + ( elem , ) , signature )
else : # use introspection to get the Python obj
try :
themodule = __import__ ( "." . join ( module_name ) )
obj = getattr ( themodule , elem )
spec = getfullargspec ( obj )
if signature . args . args :
logger . warn ( "Overriding pythran description with argspec information for: {}" . format ( "." . join ( module_name + ( elem , ) ) ) )
args = [ ast . Name ( arg , ast . Param ( ) , None ) for arg in spec . args ]
defaults = list ( spec . defaults or [ ] )
if sys . version_info . major == 3 :
args += [ ast . Name ( arg , ast . Param ( ) , None ) for arg in spec . kwonlyargs ]
defaults += [ spec . kwonlydefaults [ kw ] for kw in spec . kwonlyargs ]
# Avoid use of comprehension to fill " as much args / defauls " as
# possible
signature . args . args = args [ : - len ( defaults ) ]
signature . args . defaults = [ ]
for arg , value in zip ( args [ - len ( defaults ) : ] , defaults ) :
signature . args . defaults . append ( to_ast ( value ) )
signature . args . args . append ( arg )
except ( AttributeError , ImportError , TypeError , ToNotEval ) :
pass
|
def get_invoices_per_page ( self , per_page = 1000 , page = 1 , params = None ) :
"""Get invoices per page
: param per _ page : How many objects per page . Default : 1000
: param page : Which page . Default : 1
: param params : Search parameters . Default : { }
: return : list"""
|
return self . _get_resource_per_page ( resource = INVOICES , per_page = per_page , page = page , params = params )
|
def close ( self ) :
"""Close socket ( s )"""
|
if isinstance ( self . c , dict ) :
for client in self . c . values ( ) :
client . sock . close ( )
return
self . c . socket . close ( )
|
def write_title ( self , title : str , title_color : str = YELLOW , hyphen_line_color : str = WHITE ) :
"""Prints title with hyphen line underneath it .
: param title : The title to print .
: param title _ color : The title text color ( default is yellow ) .
: param hyphen _ line _ color : The hyphen line color ( default is white ) ."""
|
self . write_line ( title_color + title )
self . write_line ( hyphen_line_color + '=' * ( len ( title ) + 3 ) )
|
def get_sequence_rules_for_assessment ( self , assessment_id ) :
"""Gets a ` ` SequenceRuleList ` ` for an entire assessment .
arg : assessment _ id ( osid . id . Id ) : an assessment ` ` Id ` `
return : ( osid . assessment . authoring . SequenceRuleList ) - the
returned ` ` SequenceRule ` ` list
raise : NullArgument - ` ` assessment _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *"""
|
# First , recursively get all the partIds for the assessment
def get_all_children_part_ids ( part ) :
child_ids = [ ]
if part . has_children ( ) :
child_ids = list ( part . get_child_assessment_part_ids ( ) )
for child in part . get_child_assessment_parts ( ) :
child_ids += get_all_children_part_ids ( child )
return child_ids
all_assessment_part_ids = [ ]
mgr = self . _get_provider_manager ( 'ASSESSMENT' , local = True )
lookup_session = mgr . get_assessment_lookup_session ( proxy = self . _proxy )
lookup_session . use_federated_bank_view ( )
assessment = lookup_session . get_assessment ( assessment_id )
if assessment . has_children ( ) :
mgr = self . _get_provider_manager ( 'ASSESSMENT_AUTHORING' , local = True )
lookup_session = mgr . get_assessment_part_lookup_session ( proxy = self . _proxy )
lookup_session . use_federated_bank_view ( )
all_assessment_part_ids = list ( assessment . get_child_ids ( ) )
for child_part_id in assessment . get_child_ids ( ) :
child_part = lookup_session . get_assessment_part ( child_part_id )
all_assessment_part_ids += get_all_children_part_ids ( child_part )
id_strs = [ str ( part_id ) for part_id in all_assessment_part_ids ]
collection = JSONClientValidated ( 'assessment_authoring' , collection = 'SequenceRule' , runtime = self . _runtime )
result = collection . find ( dict ( { 'assessmentPartId' : { '$in' : id_strs } } , ** self . _view_filter ( ) ) )
return objects . SequenceRuleList ( result , runtime = self . _runtime )
|
def sign ( self , msg , key ) :
"""Create a signature over a message as defined in RFC7515 using an
Elliptic curve key
: param msg : The message
: param key : An ec . EllipticCurvePrivateKey instance
: return :"""
|
if not isinstance ( key , ec . EllipticCurvePrivateKey ) :
raise TypeError ( "The private key must be an instance of " "ec.EllipticCurvePrivateKey" )
self . _cross_check ( key . public_key ( ) )
num_bits = key . curve . key_size
num_bytes = ( num_bits + 7 ) // 8
asn1sig = key . sign ( msg , ec . ECDSA ( self . hash_algorithm ( ) ) )
# Cryptography returns ASN . 1 - encoded signature data ; decode as JWS
# uses raw signatures ( r | | s )
( r , s ) = decode_dss_signature ( asn1sig )
return int_to_bytes ( r , num_bytes ) + int_to_bytes ( s , num_bytes )
|
def _setup_authentication ( self , username , password ) :
'''Create the authentication object with the given credentials .'''
|
# # BUG WORKAROUND
if self . version < 1.1 : # Version 1.0 had a bug when using the key parameter .
# Later versions have the opposite bug ( a key in the username doesn ' t function )
if not username :
username = self . _key
self . _key = None
if not username :
return
if not password :
password = '12345'
# the same combination on my luggage ! ( required dummy value )
# realm = ' Redmine API ' - doesn ' t always work
# create a password manager
password_mgr = urllib2 . HTTPPasswordMgrWithDefaultRealm ( )
password_mgr . add_password ( None , self . _url , username , password )
handler = urllib2 . HTTPBasicAuthHandler ( password_mgr )
# create " opener " ( OpenerDirector instance )
self . _opener = urllib2 . build_opener ( handler )
# set the opener when we fetch the URL
self . _opener . open ( self . _url )
# Install the opener .
urllib2 . install_opener ( self . _opener )
|
def _compute_ranks ( X , winsorize = False , truncation = None , verbose = True ) :
"""Transform each column into ranked data . Tied ranks are averaged .
Ranks can optionally be winsorized as described in Liu 2009 otherwise
this returns Tsukahara ' s scaled rank based Z - estimator .
Parameters
X : array - like , shape = ( n _ samples , n _ features )
The data matrix where each column is a feature .
Row observations for each column will be replaced
by correponding rank .
winsorize : bool
Choose whether ranks should be winsorized ( trimmed ) or not . If True ,
then ranks will be winsorized using the truncation parameter .
truncation : ( float )
The default value is given by 1 / ( 4 n ^ ( 1/4 ) * sqrt ( pi log n ) ) , where
n is the number of samples .
Returns
Xrank
References
Liu , Han , John Lafferty , and Larry Wasserman .
" The nonparanormal : Semiparametric estimation of high dimensional
undirected graphs . "
Journal of Machine Learning Research 10 . Oct ( 2009 ) : 2295-2328."""
|
n_samples , n_features = X . shape
Xrank = np . zeros ( shape = X . shape )
if winsorize :
if truncation is None :
truncation = 1 / ( 4 * np . power ( n_samples , 0.25 ) * np . sqrt ( np . pi * np . log ( n_samples ) ) )
elif truncation > 1 :
truncation = np . min ( 1.0 , truncation )
for col in np . arange ( n_features ) :
Xrank [ : , col ] = rankdata ( X [ : , col ] , method = "average" )
Xrank [ : , col ] /= n_samples
if winsorize :
if n_samples > 100 * n_features :
Xrank [ : , col ] = n_samples * Xrank [ : , col ] / ( n_samples + 1 )
else :
lower_truncate = Xrank [ : , col ] <= truncation
upper_truncate = Xrank [ : , col ] > 1 - truncation
Xrank [ lower_truncate , col ] = truncation
Xrank [ upper_truncate , col ] = 1 - truncation
return Xrank
|
def _apply_dvs_product_info ( product_info_spec , product_info_dict ) :
'''Applies the values of the product _ info _ dict dictionary to a product info
spec ( vim . DistributedVirtualSwitchProductSpec )'''
|
if product_info_dict . get ( 'name' ) :
product_info_spec . name = product_info_dict [ 'name' ]
if product_info_dict . get ( 'vendor' ) :
product_info_spec . vendor = product_info_dict [ 'vendor' ]
if product_info_dict . get ( 'version' ) :
product_info_spec . version = product_info_dict [ 'version' ]
|
def rsem ( job , job_vars ) :
"""Runs RSEM to produce counts
job _ vars : tuple Tuple of dictionaries : input _ args and ids"""
|
input_args , ids = job_vars
work_dir = job . fileStore . getLocalTempDir ( )
cpus = input_args [ 'cpu_count' ]
sudo = input_args [ 'sudo' ]
single_end_reads = input_args [ 'single_end_reads' ]
# I / O
filtered_bam , rsem_ref = return_input_paths ( job , work_dir , ids , 'filtered.bam' , 'rsem_ref.zip' )
subprocess . check_call ( [ 'unzip' , '-o' , os . path . join ( work_dir , 'rsem_ref.zip' ) , '-d' , work_dir ] )
output_prefix = 'rsem'
# Make tool call to Docker
parameters = [ '--quiet' , '--no-qualities' , '-p' , str ( cpus ) , '--forward-prob' , '0.5' , '--seed-length' , '25' , '--fragment-length-mean' , '-1.0' , '--bam' , docker_path ( filtered_bam ) ]
if not single_end_reads :
parameters . extend ( [ '--paired-end' ] )
parameters . extend ( [ '/data/rsem_ref/hg19_M_rCRS_ref' , output_prefix ] )
docker_call ( tool = 'quay.io/ucsc_cgl/rsem:1.2.25--4e8d1b31d4028f464b3409c6558fb9dfcad73f88' , tool_parameters = parameters , work_dir = work_dir , sudo = sudo )
os . rename ( os . path . join ( work_dir , output_prefix + '.genes.results' ) , os . path . join ( work_dir , 'rsem_gene.tab' ) )
os . rename ( os . path . join ( work_dir , output_prefix + '.isoforms.results' ) , os . path . join ( work_dir , 'rsem_isoform.tab' ) )
# Write to FileStore
ids [ 'rsem_gene.tab' ] = job . fileStore . writeGlobalFile ( os . path . join ( work_dir , 'rsem_gene.tab' ) )
ids [ 'rsem_isoform.tab' ] = job . fileStore . writeGlobalFile ( os . path . join ( work_dir , 'rsem_isoform.tab' ) )
# Run child jobs
return job . addChildJobFn ( rsem_postprocess , job_vars ) . rv ( )
|
def parse ( self , elt , ps ) :
'''processContents - - ' lax ' | ' skip ' | ' strict ' , ' strict '
1 ) if ' skip ' check namespaces , and return the DOM node .
2 ) if ' lax ' look for declaration , or definition . If
not found return DOM node .
3 ) if ' strict ' get declaration , or raise .'''
|
skip = self . processContents == 'skip'
nspname , pname = _get_element_nsuri_name ( elt )
what = GED ( nspname , pname )
if not skip and what is not None :
pyobj = what . parse ( elt , ps )
try :
pyobj . typecode = what
except AttributeError , ex : # Assume this means builtin type .
pyobj = WrapImmutable ( pyobj , what )
return pyobj
# Allow use of " < any > " element declarations w / local
# element declarations
prefix , typeName = SplitQName ( _find_type ( elt ) )
if not skip and typeName :
namespaceURI = _resolve_prefix ( elt , prefix or 'xmlns' )
# First look thru user defined namespaces , if don ' t find
# look for ' primitives ' .
pyclass = GTD ( namespaceURI , typeName ) or Any
what = pyclass ( pname = ( nspname , pname ) )
pyobj = what . parse ( elt , ps )
try :
pyobj . typecode = what
except AttributeError , ex : # Assume this means builtin type .
pyobj = WrapImmutable ( pyobj , what )
what . typed = True
return pyobj
if skip :
what = XML ( pname = ( nspname , pname ) , wrapped = False )
elif self . processContents == 'lax' :
what = Any ( pname = ( nspname , pname ) , unique = True )
else :
what = Any ( pname = ( nspname , pname ) , unique = True )
try :
pyobj = what . parse ( elt , ps )
except EvaluateException , ex :
self . logger . debug ( "error parsing: %s" % str ( ex ) )
if len ( _children ( elt ) ) != 0 :
self . logger . debug ( 'parse <any>, return as dict' )
return Any ( aslist = False ) . parse_into_dict_or_list ( elt , ps )
self . logger . debug ( "Give up, parse (%s,%s) as a String" , what . nspname , what . pname )
what = String ( pname = ( nspname , pname ) , typed = False )
return WrapImmutable ( what . parse ( elt , ps ) , what )
if pyobj is None :
return
# dict is elementName : value pairs
if type ( pyobj ) is dict :
return pyobj
try :
pyobj . typecode = what
except AttributeError :
pyobj = WrapImmutable ( pyobj , what )
return pyobj
|
def slice ( self , tf_tensor , tensor_shape ) :
"""" Slice out the corresponding part of tensor given the pnum variable ."""
|
tensor_layout = self . tensor_layout ( tensor_shape )
if tensor_layout . is_fully_replicated :
return self . LaidOutTensor ( [ tf_tensor ] )
else :
slice_shape = self . slice_shape ( tensor_shape )
slice_begins = [ self . slice_begin ( tensor_shape , pnum ) for pnum in xrange ( self . size ) ]
slice_begins_tensor = tf . stack ( slice_begins )
# slice on source device
selected_slice_begin = tf . gather ( slice_begins_tensor , self . pnum_tensor )
return self . LaidOutTensor ( [ tf . slice ( tf_tensor , selected_slice_begin , slice_shape ) ] )
|
def private_ips_absent ( name , network_interface_name = None , network_interface_id = None , private_ip_addresses = None , region = None , key = None , keyid = None , profile = None ) :
'''Ensure an ENI does not have secondary private ip addresses associated with it
name
( String ) - State definition name
network _ interface _ id
( String ) - The EC2 network interface id , example eni - 123456789
private _ ip _ addresses
( List or String ) - The secondary private ip address ( es ) that should be absent on the ENI .
region
( string ) - Region to connect to .
key
( string ) - Secret key to be used .
keyid
( string ) - Access key to be used .
profile
( variable ) - A dict with region , key and keyid , or a pillar key ( string ) that contains a
dict with region , key and keyid .'''
|
if not salt . utils . data . exactly_one ( ( network_interface_name , network_interface_id ) ) :
raise SaltInvocationError ( "Exactly one of 'network_interface_name', " "'network_interface_id' must be provided" )
if not private_ip_addresses :
raise SaltInvocationError ( "You must provide the private_ip_addresses to unassociate with " "the ENI" )
if not isinstance ( private_ip_addresses , list ) :
private_ip_addresses = [ private_ip_addresses ]
ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { 'new' : [ ] , 'old' : [ ] } }
get_eni_args = { 'name' : network_interface_name , 'network_interface_id' : network_interface_id , 'region' : region , 'key' : key , 'keyid' : keyid , 'profile' : profile }
eni = __salt__ [ 'boto_ec2.get_network_interface' ] ( ** get_eni_args )
# Check if there are any old private ips to remove from the eni
primary_private_ip = None
if eni and eni . get ( 'result' , { } ) . get ( 'private_ip_addresses' ) :
for eni_pip in eni [ 'result' ] [ 'private_ip_addresses' ] :
ret [ 'changes' ] [ 'old' ] . append ( eni_pip [ 'private_ip_address' ] )
if eni_pip [ 'primary' ] :
primary_private_ip = eni_pip [ 'private_ip_address' ]
ips_to_remove = [ ]
for private_ip in private_ip_addresses :
if private_ip in ret [ 'changes' ] [ 'old' ] :
ips_to_remove . append ( private_ip )
if private_ip == primary_private_ip :
ret [ 'result' ] = False
ret [ 'comment' ] = ( 'You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n' . format ( primary_private_ip , '\n\t- ' + '\n\t- ' . join ( ret [ 'changes' ] [ 'old' ] ) , '\n\t- ' + '\n\t- ' . join ( private_ip_addresses ) ) )
ret [ 'changes' ] = { }
return ret
if ips_to_remove :
if not __opts__ [ 'test' ] : # Unassign secondary private ips to ENI
assign_ips_args = { 'network_interface_id' : network_interface_id , 'private_ip_addresses' : ips_to_remove , 'region' : region , 'key' : key , 'keyid' : keyid , 'profile' : profile }
__salt__ [ 'boto_ec2.unassign_private_ip_addresses' ] ( ** assign_ips_args )
# Verify secondary private ips were properly unassigned from ENI
eni = __salt__ [ 'boto_ec2.get_network_interface' ] ( ** get_eni_args )
if eni and eni . get ( 'result' , { } ) . get ( 'private_ip_addresses' , None ) :
for eni_pip in eni [ 'result' ] [ 'private_ip_addresses' ] :
ret [ 'changes' ] [ 'new' ] . append ( eni_pip [ 'private_ip_address' ] )
ips_not_removed = [ ]
for private_ip in private_ip_addresses :
if private_ip in ret [ 'changes' ] [ 'new' ] :
ips_not_removed . append ( private_ip )
if ips_not_removed :
ret [ 'result' ] = False
ret [ 'comment' ] = ( 'ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n' . format ( '\n\t- ' + '\n\t- ' . join ( ret [ 'changes' ] [ 'new' ] ) , '\n\t- ' + '\n\t- ' . join ( ips_to_remove ) , '\n\t- ' + '\n\t- ' . join ( ips_not_removed ) ) )
else :
ret [ 'comment' ] = "removed ips: {0}" . format ( '\n\t- ' + '\n\t- ' . join ( ips_to_remove ) )
# Verify there were changes
if ret [ 'changes' ] [ 'old' ] == ret [ 'changes' ] [ 'new' ] :
ret [ 'changes' ] = { }
else : # Testing mode , show that there were ips to remove
ret [ 'comment' ] = ( 'ips on eni: {0}\n' 'ips that would be removed: {1}\n' . format ( '\n\t- ' + '\n\t- ' . join ( ret [ 'changes' ] [ 'old' ] ) , '\n\t- ' + '\n\t- ' . join ( ips_to_remove ) ) )
ret [ 'changes' ] = { }
ret [ 'result' ] = None
else :
ret [ 'comment' ] = 'ips on network interface: {0}' . format ( '\n\t- ' + '\n\t- ' . join ( ret [ 'changes' ] [ 'old' ] ) )
# there were no changes since we did not attempt to remove ips
ret [ 'changes' ] = { }
return ret
|
def get_last_traded_dt ( self , asset , dt , data_frequency ) :
"""Given an asset and dt , returns the last traded dt from the viewpoint
of the given dt .
If there is a trade on the dt , the answer is dt provided ."""
|
return self . _get_pricing_reader ( data_frequency ) . get_last_traded_dt ( asset , dt )
|
def solve ( self , S , dimK = None ) :
"""Compute sparse coding and dictionary update for training
data ` S ` ."""
|
# Use dimK specified in _ _ init _ _ as default
if dimK is None and self . dimK is not None :
dimK = self . dimK
# Start solve timer
self . timer . start ( [ 'solve' , 'solve_wo_eval' ] )
# Solve CSC problem on S and do dictionary step
self . init_vars ( S , dimK )
self . xstep ( S , self . lmbda , dimK )
self . dstep ( )
# Stop solve timer
self . timer . stop ( 'solve_wo_eval' )
# Extract and record iteration stats
self . manage_itstat ( )
# Increment iteration count
self . j += 1
# Stop solve timer
self . timer . stop ( 'solve' )
# Return current dictionary
return self . getdict ( )
|
def local_self_attention_layer ( hparams , prefix ) :
"""Create self - attention layer based on hyperparameters ."""
|
return transformer_layers . LocalSelfAttention ( num_heads = hparams . get ( prefix + "num_heads" ) , num_memory_heads = hparams . get ( prefix + "num_memory_heads" ) , radius = hparams . local_attention_radius , key_value_size = hparams . d_kv , shared_kv = hparams . get ( prefix + "shared_kv" , False ) , attention_kwargs = attention_kwargs_from_hparams ( hparams ) )
|
def singleton ( cls , session , include = None ) :
"""Get the a singleton API resource .
Some Helium API resources are singletons . The authorized user
and organization for a given API key are examples of this .
. . code - block : : python
authorized _ user = User . singleton ( session )
will retrieve the authorized user for the given
: class : ` Session `
Keyword Args :
include : Resource classes to include"""
|
params = build_request_include ( include , None )
url = session . _build_url ( cls . _resource_path ( ) )
process = cls . _mk_one ( session , singleton = True , include = include )
return session . get ( url , CB . json ( 200 , process ) , params = params )
|
def unnest ( elem ) :
"""Flatten an arbitrarily nested iterable
: param elem : An iterable to flatten
: type elem : : class : ` ~ collections . Iterable `
> > > nested _ iterable = ( 1234 , ( 3456 , 4398345 , ( 234234 ) ) , ( 2396 , ( 23895750 , 9283798 , 29384 , ( 289375983275 , 293759 , 2347 , ( 2098 , 7987 , 27599 ) ) ) ) )
> > > list ( vistir . misc . unnest ( nested _ iterable ) )
[1234 , 3456 , 4398345 , 234234 , 2396 , 23895750 , 9283798 , 29384 , 289375983275 , 293759 , 2347 , 2098 , 7987 , 27599]"""
|
if isinstance ( elem , Iterable ) and not isinstance ( elem , six . string_types ) :
elem , target = tee ( elem , 2 )
else :
target = elem
for el in target :
if isinstance ( el , Iterable ) and not isinstance ( el , six . string_types ) :
el , el_copy = tee ( el , 2 )
for sub in unnest ( el_copy ) :
yield sub
else :
yield el
|
def _compile_update_from ( self , query ) :
"""Compile the " from " clause for an update with a join .
: param query : A QueryBuilder instance
: type query : QueryBuilder
: return : The compiled sql
: rtype : str"""
|
if not query . joins :
return ""
froms = [ ]
for join in query . joins :
froms . append ( self . wrap_table ( join . table ) )
if len ( froms ) :
return " FROM %s" % ", " . join ( froms )
return ""
|
def rec_sqrt ( x , context = None ) :
"""Return the reciprocal square root of x .
Return + Inf if x is ± 0 , + 0 if x is + Inf , and NaN if x is negative ."""
|
return _apply_function_in_current_context ( BigFloat , mpfr . mpfr_rec_sqrt , ( BigFloat . _implicit_convert ( x ) , ) , context , )
|
def parse ( self , title , pageid = None ) :
"""Returns Mediawiki action = parse query string"""
|
qry = self . PARSE . substitute ( WIKI = self . uri , ENDPOINT = self . endpoint , PAGE = safequote ( title ) or pageid )
if pageid and not title :
qry = qry . replace ( '&page=' , '&pageid=' ) . replace ( '&redirects' , '' )
if self . variant :
qry += '&variant=' + self . variant
self . set_status ( 'parse' , pageid or title )
return qry
|
def timeline ( self ) :
"""> > > Cluster ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ) . timeline
> > > Cluster ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , TimelineHistory . from _ node ( 1 , ' [ ] ' ) ) . timeline
> > > Cluster ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , TimelineHistory . from _ node ( 1 , ' [ [ " a " ] ] ' ) ) . timeline"""
|
if self . history :
if self . history . lines :
try :
return int ( self . history . lines [ - 1 ] [ 0 ] ) + 1
except Exception :
logger . error ( 'Failed to parse cluster history from DCS: %s' , self . history . lines )
elif self . history . value == '[]' :
return 1
return 0
|
def create ( self , attributes = None , ** kwargs ) :
"""Creates a webhook with given attributes ."""
|
return super ( WebhooksProxy , self ) . create ( resource_id = None , attributes = attributes )
|
def bounded_from_config ( cls , cp , section , variable_args , bounds_required = False , additional_opts = None ) :
"""Returns a bounded distribution based on a configuration file . The
parameters for the distribution are retrieved from the section titled
" [ ` section ` - ` variable _ args ` ] " in the config file .
Parameters
cls : pycbc . prior class
The class to initialize with .
cp : pycbc . workflow . WorkflowConfigParser
A parsed configuration file that contains the distribution
options .
section : str
Name of the section in the configuration file .
variable _ args : str
The names of the parameters for this distribution , separated by
` prior . VARARGS _ DELIM ` . These must appear in the " tag " part
of the section header .
bounds _ required : { False , bool }
If True , raise a ValueError if a min and max are not provided for
every parameter . Otherwise , the prior will be initialized with the
parameter set to None . Even if bounds are not required , a
ValueError will be raised if only one bound is provided ; i . e . ,
either both bounds need to provided or no bounds .
additional _ opts : { None , dict }
Provide additional options to be passed to the distribution class ;
should be a dictionary specifying option - > value . If an option is
provided that also exists in the config file , the value provided will
be used instead of being read from the file .
Returns
cls
An instance of the given class ."""
|
tag = variable_args
variable_args = variable_args . split ( VARARGS_DELIM )
if additional_opts is None :
additional_opts = { }
# list of args that are used to construct distribution
special_args = [ "name" ] + [ 'min-{}' . format ( arg ) for arg in variable_args ] + [ 'max-{}' . format ( arg ) for arg in variable_args ] + [ 'btype-min-{}' . format ( arg ) for arg in variable_args ] + [ 'btype-max-{}' . format ( arg ) for arg in variable_args ] + [ 'cyclic-{}' . format ( arg ) for arg in variable_args ] + list ( additional_opts . keys ( ) )
# get a dict with bounds as value
dist_args = { }
for param in variable_args :
bounds = get_param_bounds_from_config ( cp , section , tag , param )
if bounds_required and bounds is None :
raise ValueError ( "min and/or max missing for parameter %s" % ( param ) )
dist_args [ param ] = bounds
# add any additional options that user put in that section
for key in cp . options ( "-" . join ( [ section , tag ] ) ) : # ignore options that are already included
if key in special_args :
continue
# check if option can be cast as a float
val = cp . get_opt_tag ( section , key , tag )
try :
val = float ( val )
except ValueError :
pass
# add option
dist_args . update ( { key : val } )
dist_args . update ( additional_opts )
# construction distribution and add to list
return cls ( ** dist_args )
|
def p_sens_egde_paren ( self , p ) :
'senslist : AT LPAREN edgesigs RPAREN'
|
p [ 0 ] = SensList ( p [ 3 ] , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) )
|
def _rsaes_oaep_decrypt ( self , C , h = None , mgf = None , L = None ) :
"""Internal method providing RSAES - OAEP - DECRYPT as defined in Sect .
7.1.2 of RFC 3447 . Not intended to be used directly . Please , see
encrypt ( ) method for type " OAEP " .
Input :
C : ciphertext to be decrypted , an octet string of length k , where
k = 2 * hLen + 2 ( k denotes the length in octets of the RSA modulus
and hLen the length in octets of the hash function output )
h : hash function name ( in ' md2 ' , ' md4 ' , ' md5 ' , ' sha1 ' , ' tls ' ,
' sha256 ' , ' sha384 ' ) . ' sha1 ' is used if none is provided .
mgf : the mask generation function f : seed , maskLen - > mask
L : optional label whose association with the message is to be
verified ; the default value for L , if not provided is the empty
string .
Output :
message , an octet string of length k mLen , where mLen < = k - 2 * hLen - 2
On error , None is returned ."""
|
# The steps below are the one described in Sect . 7.1.2 of RFC 3447.
# 1 ) Length Checking
# 1 . a ) is not done
if h is None :
h = "sha1"
if not h in _hashFuncParams :
warning ( "Key._rsaes_oaep_decrypt(): unknown hash function %s." , h )
return None
hLen = _hashFuncParams [ h ] [ 0 ]
hFun = _hashFuncParams [ h ] [ 1 ]
k = self . modulusLen / 8
cLen = len ( C )
if cLen != k : # 1 . b )
warning ( "Key._rsaes_oaep_decrypt(): decryption error. " "(cLen != k)" )
return None
if k < 2 * hLen + 2 :
warning ( "Key._rsaes_oaep_decrypt(): decryption error. " "(k < 2*hLen + 2)" )
return None
# 2 ) RSA decryption
c = pkcs_os2ip ( C )
# 2 . a )
m = self . _rsadp ( c )
# 2 . b )
EM = pkcs_i2osp ( m , k )
# 2 . c )
# 3 ) EME - OAEP decoding
if L is None : # 3 . a )
L = ""
lHash = hFun ( L )
Y = EM [ : 1 ]
# 3 . b )
if Y != '\x00' :
warning ( "Key._rsaes_oaep_decrypt(): decryption error. " "(Y is not zero)" )
return None
maskedSeed = EM [ 1 : 1 + hLen ]
maskedDB = EM [ 1 + hLen : ]
if mgf is None :
mgf = lambda x , y : pkcs_mgf1 ( x , y , h )
seedMask = mgf ( maskedDB , hLen )
# 3 . c )
seed = strxor ( maskedSeed , seedMask )
# 3 . d )
dbMask = mgf ( seed , k - hLen - 1 )
# 3 . e )
DB = strxor ( maskedDB , dbMask )
# 3 . f )
# I am aware of the note at the end of 7.1.2 regarding error
# conditions reporting but the one provided below are for _ local _
# debugging purposes . - - arno
lHashPrime = DB [ : hLen ]
# 3 . g )
tmp = DB [ hLen : ] . split ( '\x01' , 1 )
if len ( tmp ) != 2 :
warning ( "Key._rsaes_oaep_decrypt(): decryption error. " "(0x01 separator not found)" )
return None
PS , M = tmp
if PS != '\x00' * len ( PS ) :
warning ( "Key._rsaes_oaep_decrypt(): decryption error. " "(invalid padding string)" )
return None
if lHash != lHashPrime :
warning ( "Key._rsaes_oaep_decrypt(): decryption error. " "(invalid hash)" )
return None
return M
|
def _c0 ( self ) :
"the logarithm of normalizing constant in pdf"
|
h_df = self . df / 2
p , S = self . _p , self . S
return h_df * ( logdet ( S ) + p * logtwo ) + lpgamma ( p , h_df )
|
def getHelp ( arg = None ) :
"""This function provides interactive manuals and tutorials ."""
|
if arg == None :
print ( '--------------------------------------------------------------' )
print ( 'Hello, this is an interactive help system of HITRANonline API.' )
print ( '--------------------------------------------------------------' )
print ( 'Run getHelp(.) with one of the following arguments:' )
print ( ' tutorial - interactive tutorials on HAPI' )
print ( ' units - units used in calculations' )
print ( ' index - index of available HAPI functions' )
elif arg == 'tutorial' :
print ( '-----------------------------------' )
print ( 'This is a tutorial section of help.' )
print ( '-----------------------------------' )
print ( 'Please choose the subject of tutorial:' )
print ( ' data - downloading the data and working with it' )
print ( ' spectra - calculating spectral functions' )
print ( ' plotting - visualizing data with matplotlib' )
print ( ' python - Python quick start guide' )
elif arg == 'python' :
print_python_tutorial ( )
elif arg == 'data' :
print_data_tutorial ( )
elif arg == 'spectra' :
print_spectra_tutorial ( )
elif arg == 'plotting' :
print_plotting_tutorial ( )
elif arg == 'index' :
print ( '------------------------------' )
print ( 'FETCHING DATA:' )
print ( '------------------------------' )
print ( ' fetch' )
print ( ' fetch_by_ids' )
print ( '' )
print ( '------------------------------' )
print ( 'WORKING WITH DATA:' )
print ( '------------------------------' )
print ( ' db_begin' )
print ( ' db_commit' )
print ( ' tableList' )
print ( ' describe' )
print ( ' select' )
print ( ' sort' )
print ( ' extractColumns' )
print ( ' getColumn' )
print ( ' getColumns' )
print ( ' dropTable' )
print ( '' )
print ( '------------------------------' )
print ( 'CALCULATING SPECTRA:' )
print ( '------------------------------' )
print ( ' profiles' )
print ( ' partitionSum' )
print ( ' absorptionCoefficient_HT' )
print ( ' absorptionCoefficient_Voigt' )
print ( ' absorptionCoefficient_SDVoigt' )
print ( ' absorptionCoefficient_Lorentz' )
print ( ' absorptionCoefficient_Doppler' )
print ( ' transmittanceSpectrum' )
print ( ' absorptionSpectrum' )
print ( ' radianceSpectrum' )
print ( '' )
print ( '------------------------------' )
print ( 'CONVOLVING SPECTRA:' )
print ( '------------------------------' )
print ( ' convolveSpectrum' )
print ( ' slit_functions' )
print ( '' )
print ( '------------------------------' )
print ( 'INFO ON ISOTOPOLOGUES:' )
print ( '------------------------------' )
print ( ' ISO_ID' )
print ( ' abundance' )
print ( ' molecularMass' )
print ( ' moleculeName' )
print ( ' isotopologueName' )
print ( '' )
print ( '------------------------------' )
print ( 'MISCELLANEOUS:' )
print ( '------------------------------' )
print ( ' getStickXY' )
print ( ' read_hotw' )
elif arg == ISO :
print_iso ( )
elif arg == ISO_ID :
print_iso_id ( )
elif arg == profiles :
print_profiles ( )
elif arg == slit_functions :
print_slit_functions ( )
else :
help ( arg )
|
def _getfield ( self , block , name ) :
"""Return the field with the given ` name ` from ` block ` .
If no field with ` name ` exists in any namespace , raises a KeyError .
: param block : xblock to retrieve the field from
: type block : : class : ` ~ xblock . core . XBlock `
: param name : name of the field to retrieve
: type name : str
: raises KeyError : when no field with ` name ` exists in any namespace"""
|
# First , get the field from the class , if defined
block_field = getattr ( block . __class__ , name , None )
if block_field is not None and isinstance ( block_field , Field ) :
return block_field
# Not in the class , so name
# really doesn ' t name a field
raise KeyError ( name )
|
def focus0 ( self ) :
'''First focus of the ellipse , Point class .'''
|
f = Point ( self . center )
if self . xAxisIsMajor :
f . x -= self . linearEccentricity
else :
f . y -= self . linearEccentricity
return f
|
def port_add ( self , dpid , port , mac ) :
""": returns : old port if learned . ( this may be = port )
None otherwise"""
|
old_port = self . mac_to_port [ dpid ] . get ( mac , None )
self . mac_to_port [ dpid ] [ mac ] = port
if old_port is not None and old_port != port :
LOG . debug ( 'port_add: 0x%016x 0x%04x %s' , dpid , port , haddr_to_str ( mac ) )
return old_port
|
def get ( self , part ) :
"""Returns a section of the region as a new region
Accepts partitioning constants , e . g . Region . NORTH , Region . NORTH _ WEST , etc .
Also accepts an int 200-999:
* First digit : Raster ( * n * rows by * n * columns )
* Second digit : Row index ( if equal to raster , gets the whole row )
* Third digit : Column index ( if equal to raster , gets the whole column )
Region . get ( 522 ) will use a raster of 5 rows and 5 columns and return
the cell in the middle .
Region . get ( 525 ) will use a raster of 5 rows and 5 columns and return the row in the middle ."""
|
if part == self . MID_VERTICAL :
return Region ( self . x + ( self . w / 4 ) , y , self . w / 2 , self . h )
elif part == self . MID_HORIZONTAL :
return Region ( self . x , self . y + ( self . h / 4 ) , self . w , self . h / 2 )
elif part == self . MID_BIG :
return Region ( self . x + ( self . w / 4 ) , self . y + ( self . h / 4 ) , self . w / 2 , self . h / 2 )
elif isinstance ( part , int ) and part >= 200 and part <= 999 :
raster , row , column = str ( part )
self . setRaster ( raster , raster )
if row == raster and column == raster :
return self
elif row == raster :
return self . getCol ( column )
elif column == raster :
return self . getRow ( row )
else :
return self . getCell ( row , column )
else :
return self
|
def update_handles ( self , key , axis , element , ranges , style ) :
"""Update the elements of the plot ."""
|
self . teardown_handles ( )
plot_data , plot_kwargs , axis_kwargs = self . get_data ( element , ranges , style )
with abbreviated_exception ( ) :
handles = self . init_artists ( axis , plot_data , plot_kwargs )
self . handles . update ( handles )
return axis_kwargs
|
def pipe ( self , texts , as_tuples = False , n_threads = - 1 , batch_size = 1000 , disable = [ ] , cleanup = False , component_cfg = None , ) :
"""Process texts as a stream , and yield ` Doc ` objects in order .
texts ( iterator ) : A sequence of texts to process .
as _ tuples ( bool ) : If set to True , inputs should be a sequence of
( text , context ) tuples . Output will then be a sequence of
( doc , context ) tuples . Defaults to False .
batch _ size ( int ) : The number of texts to buffer .
disable ( list ) : Names of the pipeline components to disable .
cleanup ( bool ) : If True , unneeded strings are freed to control memory
use . Experimental .
component _ cfg ( dict ) : An optional dictionary with extra keyword
arguments for specific components .
YIELDS ( Doc ) : Documents in the order of the original text .
DOCS : https : / / spacy . io / api / language # pipe"""
|
if n_threads != - 1 :
deprecation_warning ( Warnings . W016 )
if as_tuples :
text_context1 , text_context2 = itertools . tee ( texts )
texts = ( tc [ 0 ] for tc in text_context1 )
contexts = ( tc [ 1 ] for tc in text_context2 )
docs = self . pipe ( texts , batch_size = batch_size , disable = disable , component_cfg = component_cfg , )
for doc , context in izip ( docs , contexts ) :
yield ( doc , context )
return
docs = ( self . make_doc ( text ) for text in texts )
if component_cfg is None :
component_cfg = { }
for name , proc in self . pipeline :
if name in disable :
continue
kwargs = component_cfg . get ( name , { } )
# Allow component _ cfg to overwrite the top - level kwargs .
kwargs . setdefault ( "batch_size" , batch_size )
if hasattr ( proc , "pipe" ) :
docs = proc . pipe ( docs , ** kwargs )
else : # Apply the function , but yield the doc
docs = _pipe ( proc , docs , kwargs )
# Track weakrefs of " recent " documents , so that we can see when they
# expire from memory . When they do , we know we don ' t need old strings .
# This way , we avoid maintaining an unbounded growth in string entries
# in the string store .
recent_refs = weakref . WeakSet ( )
old_refs = weakref . WeakSet ( )
# Keep track of the original string data , so that if we flush old strings ,
# we can recover the original ones . However , we only want to do this if we ' re
# really adding strings , to save up - front costs .
original_strings_data = None
nr_seen = 0
for doc in docs :
yield doc
if cleanup :
recent_refs . add ( doc )
if nr_seen < 10000 :
old_refs . add ( doc )
nr_seen += 1
elif len ( old_refs ) == 0 :
old_refs , recent_refs = recent_refs , old_refs
if original_strings_data is None :
original_strings_data = list ( self . vocab . strings )
else :
keys , strings = self . vocab . strings . _cleanup_stale_strings ( original_strings_data )
self . vocab . _reset_cache ( keys , strings )
self . tokenizer . _reset_cache ( keys )
nr_seen = 0
|
def is_linguist_lookup ( self , lookup ) :
"""Returns true if the given lookup is a valid linguist lookup ."""
|
field = utils . get_field_name_from_lookup ( lookup )
# To keep default behavior with " FieldError : Cannot resolve keyword " .
if ( field not in self . concrete_field_names and field in self . linguist_field_names ) :
return True
return False
|
def eigh_robust ( a , b = None , eigvals = None , eigvals_only = False , overwrite_a = False , overwrite_b = False , turbo = True , check_finite = True ) :
"""Robustly solve the Hermitian generalized eigenvalue problem
This function robustly solves the Hermetian generalized eigenvalue problem
` ` A v = lambda B v ` ` in the case that B is not strictly positive definite .
When B is strictly positive - definite , the result is equivalent to
scipy . linalg . eigh ( ) within floating - point accuracy .
Parameters
a : ( M , M ) array _ like
A complex Hermitian or real symmetric matrix whose eigenvalues and
eigenvectors will be computed .
b : ( M , M ) array _ like , optional
A complex Hermitian or real symmetric matrix .
If omitted , identity matrix is assumed .
eigvals : tuple ( lo , hi ) , optional
Indexes of the smallest and largest ( in ascending order ) eigenvalues
and corresponding eigenvectors to be returned : 0 < = lo < = hi < = M - 1.
If omitted , all eigenvalues and eigenvectors are returned .
eigvals _ only : bool , optional
Whether to calculate only eigenvalues and no eigenvectors .
( Default : both are calculated )
turbo : bool , optional
Use divide and conquer algorithm ( faster but expensive in memory ,
only for generalized eigenvalue problem and if eigvals = None )
overwrite _ a : bool , optional
Whether to overwrite data in ` a ` ( may improve performance )
overwrite _ b : bool , optional
Whether to overwrite data in ` b ` ( may improve performance )
check _ finite : bool , optional
Whether to check that the input matrices contain only finite numbers .
Disabling may give a performance gain , but may result in problems
( crashes , non - termination ) if the inputs do contain infinities or NaNs .
Returns
w : ( N , ) float ndarray
The N ( 1 < = N < = M ) selected eigenvalues , in ascending order , each
repeated according to its multiplicity .
v : ( M , N ) complex ndarray
( if eigvals _ only = = False )"""
|
kwargs = dict ( eigvals = eigvals , eigvals_only = eigvals_only , turbo = turbo , check_finite = check_finite , overwrite_a = overwrite_a , overwrite_b = overwrite_b )
# Check for easy case first :
if b is None :
return linalg . eigh ( a , ** kwargs )
# Compute eigendecomposition of b
kwargs_b = dict ( turbo = turbo , check_finite = check_finite , overwrite_a = overwrite_b )
# b is a for this operation
S , U = linalg . eigh ( b , ** kwargs_b )
# Combine a and b on left hand side via decomposition of b
S [ S <= 0 ] = np . inf
Sinv = 1. / np . sqrt ( S )
W = Sinv [ : , None ] * np . dot ( U . T , np . dot ( a , U ) ) * Sinv
output = linalg . eigh ( W , ** kwargs )
if eigvals_only :
return output
else :
evals , evecs = output
return evals , np . dot ( U , Sinv [ : , None ] * evecs )
|
def hosts_append ( hostsfile = '/etc/hosts' , ip_addr = None , entries = None ) :
'''Append a single line to the / etc / hosts file .
CLI Example :
. . code - block : : bash
salt ' * ' dnsutil . hosts _ append / etc / hosts 127.0.0.1 ad1 . yuk . co , ad2 . yuk . co'''
|
host_list = entries . split ( ',' )
hosts = parse_hosts ( hostsfile = hostsfile )
if ip_addr in hosts :
for host in host_list :
if host in hosts [ ip_addr ] :
host_list . remove ( host )
if not host_list :
return 'No additional hosts were added to {0}' . format ( hostsfile )
append_line = '\n{0} {1}' . format ( ip_addr , ' ' . join ( host_list ) )
with salt . utils . files . fopen ( hostsfile , 'a' ) as fp_ :
fp_ . write ( salt . utils . stringutils . to_str ( append_line ) )
return 'The following line was added to {0}:{1}' . format ( hostsfile , append_line )
|
def _add_child ( self , message ) :
"""Return a new action with C { message } added as a child .
Assumes C { message } is not an end message .
@ param message : Either a C { WrittenAction } or a C { WrittenMessage } .
@ raise WrongTask : If C { message } has a C { task _ uuid } that differs from the
action ' s C { task _ uuid } .
@ raise WrongTaskLevel : If C { message } has a C { task _ level } that means
it ' s not a direct child .
@ return : A new C { WrittenAction } ."""
|
self . _validate_message ( message )
level = message . task_level
return self . transform ( ( '_children' , level ) , message )
|
def remove_sshkey ( host , known_hosts = None ) :
'''Remove a host from the known _ hosts file'''
|
if known_hosts is None :
if 'HOME' in os . environ :
known_hosts = '{0}/.ssh/known_hosts' . format ( os . environ [ 'HOME' ] )
else :
try :
known_hosts = '{0}/.ssh/known_hosts' . format ( pwd . getpwuid ( os . getuid ( ) ) . pwd_dir )
except Exception :
pass
if known_hosts is not None :
log . debug ( 'Removing ssh key for %s from known hosts file %s' , host , known_hosts )
else :
log . debug ( 'Removing ssh key for %s from known hosts file' , host )
cmd = 'ssh-keygen -R {0}' . format ( host )
subprocess . call ( cmd , shell = True )
|
def build_payload_from_queued_messages ( self , use_queue , shutdown_event ) :
"""build _ payload _ from _ queued _ messages
Empty the queued messages by building a large ` ` self . log _ payload ` `
: param use _ queue : queue holding the messages
: param shutdown _ event : shutdown event"""
|
not_done = True
while not_done :
if self . is_shutting_down ( shutdown_event = shutdown_event ) :
self . debug_log ( 'build_payload shutting down' )
return True
self . debug_log ( 'reading from queue={}' . format ( str ( use_queue ) ) )
try :
msg = use_queue . get ( block = True , timeout = self . sleep_interval )
self . log_payload = self . log_payload + msg
if self . debug :
self . debug_log ( 'got queued message={}' . format ( msg ) )
not_done = not self . queue_empty ( use_queue = use_queue )
except Exception as e :
if self . is_shutting_down ( shutdown_event = shutdown_event ) :
self . debug_log ( 'helper was shut down ' 'msgs in the queue may not all ' 'have been sent' )
else :
self . debug_log ( ( 'helper hit an ex={} shutting down ' 'msgs in the queue may not all ' 'have been sent' ) . format ( e ) )
not_done = True
self . shutdown_now = True
return True
# end of getting log msgs from the queue
self . debug_log ( 'done reading from queue' )
if self . is_shutting_down ( shutdown_event = shutdown_event ) :
self . debug_log ( 'build_payload - already shutting down' )
return True
# If the payload is getting very long ,
# stop reading and send immediately .
# Current limit is 50MB
if self . is_shutting_down ( shutdown_event = shutdown_event ) or len ( self . log_payload ) >= 524288 :
self . debug_log ( 'payload maximum size exceeded, sending immediately' )
return False
self . debug_log ( 'build_payload - done' )
return True
|
def delete_by_query ( self , indices , doc_types , query , ** query_params ) :
"""Delete documents from one or more indices and one or more types based on a query ."""
|
path = self . _make_path ( indices , doc_types , '_query' )
body = { "query" : query . serialize ( ) }
return self . _send_request ( 'DELETE' , path , body , query_params )
|
def value ( self ) :
"""Get the value of the finished async request , if it is available .
: raises CloudUnhandledError : When not checking value of ` error ` or ` is _ done ` first
: return : the payload value
: rtype : str"""
|
status_code , error_msg , payload = self . check_error ( )
if not self . _status_ok ( status_code ) and not payload :
raise CloudUnhandledError ( "Attempted to decode async request which returned an error." , reason = error_msg , status = status_code )
return self . db [ self . async_id ] [ "payload" ]
|
def _build_request_url ( self , secure , api_method , version ) :
"""Build a URL for a API method request"""
|
if secure :
proto = ANDROID . PROTOCOL_SECURE
else :
proto = ANDROID . PROTOCOL_INSECURE
req_url = ANDROID . API_URL . format ( protocol = proto , api_method = api_method , version = version )
return req_url
|
def filter_response_size ( size ) :
"""Filter : class : ` . Line ` objects by the response size ( in bytes ) .
Specially useful when looking for big file downloads .
: param size : Minimum amount of bytes a response body weighted .
: type size : string
: returns : a function that filters by the response size .
: rtype : function"""
|
if size . startswith ( '+' ) :
size_value = int ( size [ 1 : ] )
else :
size_value = int ( size )
def filter_func ( log_line ) :
bytes_read = log_line . bytes_read
if bytes_read . startswith ( '+' ) :
bytes_read = int ( bytes_read [ 1 : ] )
else :
bytes_read = int ( bytes_read )
return bytes_read >= size_value
return filter_func
|
def add_subsegment ( self , subsegment ) :
"""Add input subsegment as a child subsegment and increment
reference counter and total subsegments counter of the
parent segment ."""
|
super ( Subsegment , self ) . add_subsegment ( subsegment )
self . parent_segment . increment ( )
|
def _unfuse ( self ) :
"""Unfuses the fused RNN in to a stack of rnn cells ."""
|
assert not self . _projection_size , "_unfuse does not support projection layer yet!"
assert not self . _lstm_state_clip_min and not self . _lstm_state_clip_max , "_unfuse does not support state clipping yet!"
get_cell = { 'rnn_relu' : lambda ** kwargs : rnn_cell . RNNCell ( self . _hidden_size , activation = 'relu' , ** kwargs ) , 'rnn_tanh' : lambda ** kwargs : rnn_cell . RNNCell ( self . _hidden_size , activation = 'tanh' , ** kwargs ) , 'lstm' : lambda ** kwargs : rnn_cell . LSTMCell ( self . _hidden_size , ** kwargs ) , 'gru' : lambda ** kwargs : rnn_cell . GRUCell ( self . _hidden_size , ** kwargs ) } [ self . _mode ]
stack = rnn_cell . HybridSequentialRNNCell ( prefix = self . prefix , params = self . params )
with stack . name_scope ( ) :
ni = self . _input_size
for i in range ( self . _num_layers ) :
kwargs = { 'input_size' : ni , 'i2h_weight_initializer' : self . _i2h_weight_initializer , 'h2h_weight_initializer' : self . _h2h_weight_initializer , 'i2h_bias_initializer' : self . _i2h_bias_initializer , 'h2h_bias_initializer' : self . _h2h_bias_initializer }
if self . _dir == 2 :
stack . add ( rnn_cell . BidirectionalCell ( get_cell ( prefix = 'l%d_' % i , ** kwargs ) , get_cell ( prefix = 'r%d_' % i , ** kwargs ) ) )
else :
stack . add ( get_cell ( prefix = 'l%d_' % i , ** kwargs ) )
if self . _dropout > 0 and i != self . _num_layers - 1 :
stack . add ( rnn_cell . DropoutCell ( self . _dropout ) )
ni = self . _hidden_size * self . _dir
return stack
|
def setpreferredapi ( api ) :
"""Set the preferred Qt API .
Will raise a RuntimeError if a Qt API was already selected .
Note that QT _ API environment variable ( if set ) will take precedence ."""
|
global __PREFERRED_API
if __SELECTED_API is not None :
raise RuntimeError ( "A Qt api {} was already selected" . format ( __SELECTED_API ) )
if api . lower ( ) not in { "pyqt4" , "pyqt5" , "pyside" , "pyside2" } :
raise ValueError ( api )
__PREFERRED_API = api . lower ( )
|
def validate ( self , protocol_version , values = None ) :
"""Validate child value types and values against protocol _ version ."""
|
if values is None :
values = self . values
return self . get_schema ( protocol_version ) ( values )
|
def _get_path_for_type ( type_ ) :
"""Similar to ` _ get _ path _ for ` but for only type names ."""
|
if type_ . lower ( ) in CORE_TYPES :
return Path ( 'index.html#%s' % type_ . lower ( ) )
elif '.' in type_ :
namespace , name = type_ . split ( '.' )
return Path ( 'types' , namespace , _get_file_name ( name ) )
else :
return Path ( 'types' , _get_file_name ( type_ ) )
|
def plot_high_levels_data ( self ) :
"""Complicated function that draws the high level mean plot on canvas4,
draws all specimen , sample , or site interpretations according to the
UPPER _ LEVEL _ SHOW variable , draws the fisher mean or fisher mean by
polarity of all interpretations displayed , draws sample orientation
check if on , and if interpretation editor is open it calls the
interpretation editor to have it draw the same things ."""
|
# self . toolbar4 . home ( )
high_level = self . level_box . GetValue ( )
self . UPPER_LEVEL_NAME = self . level_names . GetValue ( )
self . UPPER_LEVEL_MEAN = self . mean_type_box . GetValue ( )
draw_net ( self . high_level_eqarea )
what_is_it = self . level_box . GetValue ( ) + ": " + self . level_names . GetValue ( )
self . high_level_eqarea . text ( - 1.2 , 1.15 , what_is_it , { 'family' : self . font_type , 'fontsize' : 10 * self . GUI_RESOLUTION , 'style' : 'normal' , 'va' : 'center' , 'ha' : 'left' } )
if self . ie_open :
self . ie . draw_net ( )
self . ie . write ( what_is_it )
# plot elements directions
self . plot_high_level_elements ( )
# plot elements means
self . plot_high_level_means ( )
# update high level stats after plotting in case of change
self . update_high_level_stats ( )
# check sample orietation
if self . check_orient_on :
self . calc_and_plot_sample_orient_check ( )
self . canvas4 . draw ( )
if self . ie_open :
self . ie . draw ( )
|
def process_ogr2ogr ( self , name , layer , input_path ) :
"""Process a layer using ogr2ogr ."""
|
output_path = os . path . join ( TEMP_DIRECTORY , '%s.json' % name )
if os . path . exists ( output_path ) :
os . remove ( output_path )
ogr2ogr_cmd = [ 'ogr2ogr' , '-f' , 'GeoJSON' , '-clipsrc' , self . config [ 'bbox' ] ]
if 'where' in layer :
ogr2ogr_cmd . extend ( [ '-where' , '"%s"' % layer [ 'where' ] ] )
ogr2ogr_cmd . extend ( [ output_path , input_path ] )
sys . stdout . write ( '* Running ogr2ogr\n' )
if self . args . verbose :
sys . stdout . write ( ' %s\n' % ' ' . join ( ogr2ogr_cmd ) )
r = envoy . run ( ' ' . join ( ogr2ogr_cmd ) )
if r . status_code != 0 :
sys . stderr . write ( r . std_err )
return output_path
|
def start_index ( self ) :
"""Return the 1 - based index of the first item on this page ."""
|
paginator = self . paginator
# Special case , return zero if no items .
if paginator . count == 0 :
return 0
elif self . number == 1 :
return 1
return ( ( self . number - 2 ) * paginator . per_page + paginator . first_page + 1 )
|
def parse_yaml ( self , y ) :
'''Parse a YAML specification of a target execution context into this
object .'''
|
super ( TargetExecutionContext , self ) . parse_yaml ( y )
if 'id' in y :
self . id = y [ 'id' ]
else :
self . id = ''
return self
|
def add_child ( self , child ) :
'''Add child node , and copy all options to it'''
|
super ( RootNode , self ) . add_child ( child )
child . attr_wrapper = self . attr_wrapper
|
def joint_plot ( x , y , marginalBins = 50 , gridsize = 50 , plotlimits = None , logscale_cmap = False , logscale_marginals = False , alpha_hexbin = 0.75 , alpha_marginals = 0.75 , cmap = "inferno_r" , marginalCol = None , figsize = ( 8 , 8 ) , fontsize = 8 , * args , ** kwargs ) :
"""Plots some x and y data using hexbins along with a colorbar
and marginal distributions ( X and Y histograms ) .
Parameters
x : ndarray
The x data
y : ndarray
The y data
marginalBins : int , optional
The number of bins to use in calculating the marginal
histograms of x and y
gridsize : int , optional
The grid size to be passed to matplotlib . pyplot . hexbins
which sets the gridsize in calculating the hexbins
plotlimits : float , optional
The limit of the plot in x and y ( it produces a square
area centred on zero . Defaults to max range of data .
logscale _ cmap : bool , optional
Sets whether to use a logscale for the colormap .
Defaults to False .
logscale _ marginals : bool , optional
Sets whether to use a logscale for the marignals .
Defaults to False .
alpha _ hexbin : float
Alpha value to use for hexbins and color map
alpha _ marginals : float
Alpha value to use for marginal histograms
cmap : string , optional
Specifies the colormap to use , see
https : / / matplotlib . org / users / colormaps . html
for options . Defaults to ' inferno _ r '
marginalCol : string , optional
Specifies color to use for marginals ,
defaults to middle color of colormap
for a linear colormap and 70 % for a
logarithmic colormap .
figsize : tuple of 2 values , optional
Sets the figsize , defaults to ( 8 , 8)
fontsize : int , optional
Sets the fontsize for all text and axis ticks .
Defaults to 8.
* args , * * kwargs : optional
args and kwargs passed to matplotlib . pyplot . hexbins
Returns
fig : matplotlib . figure . Figure object
The figure object created to house the joint _ plot
axHexBin : matplotlib . axes . Axes object
The axis for the hexbin plot
axHistx : matplotlib . axes . Axes object
The axis for the x marginal plot
axHisty : matplotlib . axes . Axes object
The axis for the y marginal plot
cbar : matplotlib . colorbar . Colorbar
The color bar object"""
|
with _plt . rc_context ( { 'font.size' : fontsize , } ) : # definitions for the axes
hexbin_marginal_seperation = 0.01
left , width = 0.2 , 0.65 - 0.1
# left = left side of hexbin and hist _ x
bottom , height = 0.1 , 0.65 - 0.1
# bottom = bottom of hexbin and hist _ y
bottom_h = height + bottom + hexbin_marginal_seperation
left_h = width + left + hexbin_marginal_seperation
cbar_pos = [ 0.03 , bottom , 0.05 , 0.02 + width ]
rect_hexbin = [ left , bottom , width , height ]
rect_histx = [ left , bottom_h , width , 0.2 ]
rect_histy = [ left_h , bottom , 0.2 , height ]
# start with a rectangular Figure
fig = _plt . figure ( figsize = figsize )
axHexBin = _plt . axes ( rect_hexbin )
axHistx = _plt . axes ( rect_histx )
axHisty = _plt . axes ( rect_histy )
axHisty . set_xticklabels ( axHisty . xaxis . get_ticklabels ( ) , y = 0 , rotation = - 90 )
# scale specific settings
if logscale_cmap == True :
hexbinscale = 'log'
else :
hexbinscale = None
if logscale_marginals == True :
scale = 'log'
else :
scale = 'linear'
# set up colors
cmapOb = _mpl . cm . get_cmap ( cmap )
cmapOb . set_under ( color = 'white' )
if marginalCol == None :
if logscale_cmap == True :
marginalCol = cmapOb ( 0.7 )
cbarlabel = 'log10(N)'
else :
marginalCol = cmapOb ( 0.5 )
cbarlabel = 'N'
# set up limits
if plotlimits == None :
xmin = x . min ( )
xmax = x . max ( )
ymin = y . min ( )
ymax = y . max ( )
if xmax > ymax :
plotlimits = xmax * 1.1
else :
plotlimits = ymax * 1.1
# the hexbin plot :
hb = axHexBin . hexbin ( x , y , gridsize = gridsize , bins = hexbinscale , cmap = cmap , alpha = alpha_hexbin , extent = ( - plotlimits , plotlimits , - plotlimits , plotlimits ) , * args , ** kwargs )
axHexBin . axis ( [ - plotlimits , plotlimits , - plotlimits , plotlimits ] )
cbaraxes = fig . add_axes ( cbar_pos )
# This is the position for the colorbar
# cbar = _ plt . colorbar ( axp , cax = cbaraxes )
cbar = fig . colorbar ( hb , cax = cbaraxes , drawedges = False )
# , orientation = " horizontal "
cbar . solids . set_edgecolor ( "face" )
cbar . solids . set_rasterized ( True )
cbar . solids . set_alpha ( alpha_hexbin )
cbar . ax . set_yticklabels ( cbar . ax . yaxis . get_ticklabels ( ) , y = 0 , rotation = 45 )
cbar . set_label ( cbarlabel , labelpad = - 25 , y = 1.05 , rotation = 0 )
axHexBin . set_xlim ( ( - plotlimits , plotlimits ) )
axHexBin . set_ylim ( ( - plotlimits , plotlimits ) )
# now determine bin size
binwidth = ( 2 * plotlimits ) / marginalBins
xymax = _np . max ( [ _np . max ( _np . fabs ( x ) ) , _np . max ( _np . fabs ( y ) ) ] )
lim = plotlimits
# ( int ( xymax / binwidth ) + 1 ) * binwidth
bins = _np . arange ( - lim , lim + binwidth , binwidth )
axHistx . hist ( x , bins = bins , color = marginalCol , alpha = alpha_marginals , linewidth = 0 )
axHistx . set_yscale ( value = scale )
axHisty . hist ( y , bins = bins , orientation = 'horizontal' , color = marginalCol , alpha = alpha_marginals , linewidth = 0 )
axHisty . set_xscale ( value = scale )
_plt . setp ( axHistx . get_xticklabels ( ) , visible = False )
# sets x ticks to be invisible while keeping gridlines
_plt . setp ( axHisty . get_yticklabels ( ) , visible = False )
# sets x ticks to be invisible while keeping gridlines
axHistx . set_xlim ( axHexBin . get_xlim ( ) )
axHisty . set_ylim ( axHexBin . get_ylim ( ) )
return fig , axHexBin , axHistx , axHisty , cbar
|
def crypto_bloom_filter ( record , # type : Sequence [ Text ]
tokenizers , # type : List [ Callable [ [ Text , Optional [ Text ] ] , Iterable [ Text ] ] ]
schema , # type : Schema
keys # type : Sequence [ Sequence [ bytes ] ]
) : # type : ( . . . ) - > Tuple [ bitarray , Text , int ]
"""Computes the composite Bloom filter encoding of a record .
Using the method from
http : / / www . record - linkage . de / - download = wp - grlc - 2011-02 . pdf
: param record : plaintext record tuple . E . g . ( index , name , dob , gender )
: param tokenizers : A list of tokenizers . A tokenizer is a function that
returns tokens from a string .
: param schema : Schema
: param keys : Keys for the hash functions as a tuple of lists of bytes .
: return : 3 - tuple :
- bloom filter for record as a bitarray
- first element of record ( usually an index )
- number of bits set in the bloomfilter"""
|
hash_l = schema . l * 2 ** schema . xor_folds
bloomfilter = bitarray ( hash_l )
bloomfilter . setall ( False )
for ( entry , tokenize , field , key ) in zip ( record , tokenizers , schema . fields , keys ) :
fhp = field . hashing_properties
if fhp :
ngrams = list ( tokenize ( field . format_value ( entry ) ) )
hash_function = hashing_function_from_properties ( fhp )
bloomfilter |= hash_function ( ngrams , key , fhp . ks ( len ( ngrams ) ) , hash_l , fhp . encoding )
c1 = bloomfilter . count ( )
bloomfilter = fold_xor ( bloomfilter , schema . xor_folds )
c2 = bloomfilter . count ( )
return bloomfilter , record [ 0 ] , bloomfilter . count ( )
|
def options ( self , * args , ** kwargs ) :
'''Return CORS headers for preflight requests'''
|
# Allow X - Auth - Token in requests
request_headers = self . request . headers . get ( 'Access-Control-Request-Headers' )
allowed_headers = request_headers . split ( ',' )
# Filter allowed header here if needed .
# Allow request headers
self . set_header ( 'Access-Control-Allow-Headers' , ',' . join ( allowed_headers ) )
# Allow X - Auth - Token in responses
self . set_header ( 'Access-Control-Expose-Headers' , 'X-Auth-Token' )
# Allow all methods
self . set_header ( 'Access-Control-Allow-Methods' , 'OPTIONS, GET, POST' )
self . set_status ( 204 )
self . finish ( )
|
def _css_helper ( self ) :
"""Add CSS links for the current page and for the plugins"""
|
entries = [ entry for entry in self . _plugin_manager . call_hook ( "css" ) if entry is not None ]
# Load javascript for the current page
entries += self . _get_ctx ( ) [ "css" ]
entries = [ "<link href='" + entry + "' rel='stylesheet'>" for entry in entries ]
return "\n" . join ( entries )
|
def _handle_eio_disconnect ( self , sid ) :
"""Handle Engine . IO disconnect event ."""
|
self . _handle_disconnect ( sid , '/' )
if sid in self . environ :
del self . environ [ sid ]
|
def nocomment ( astr , com = '!' ) :
"""just like the comment in python .
removes any text after the phrase ' com '"""
|
alist = astr . splitlines ( )
for i in range ( len ( alist ) ) :
element = alist [ i ]
pnt = element . find ( com )
if pnt != - 1 :
alist [ i ] = element [ : pnt ]
return '\n' . join ( alist )
|
def push ( self , bot , channel_type , ar , user_id ) :
"""Use this method to push message to user of bot .
The message should be packed into ActionResponse object .
This allows to push text messages , buttons , images .
This also allows to force current state of user .
: param bot : bot that will push user
: type bot : Bot
: param channel _ type : one of [ telegram , facebook , slack ]
: type channel _ type : str
: param ar : message packed in response object
: type ar : ActionResponse
: param user _ id : user id in used channel
: type user _ id : str"""
|
self . client . push . __getattr__ ( bot . name ) . __call__ ( _method = "POST" , _params = dict ( id = user_id , channel = channel_type ) , _json = ar . to_json ( ) )
|
def body ( self ) :
"""Yields the body of the buffered file ."""
|
for fp , need_close in self . files :
try :
name = os . path . basename ( fp . name )
except AttributeError :
name = ''
for chunk in self . gen_chunks ( self . envelope . file_open ( name ) ) :
yield chunk
for chunk in self . file_chunks ( fp ) :
yield chunk
for chunk in self . gen_chunks ( self . envelope . file_close ( ) ) :
yield chunk
if need_close :
fp . close ( )
for chunk in self . close ( ) :
yield chunk
|
def stop ( opts , bot , event ) :
"""Usage : stop [ - - name = < name > ] [ - - notify = < slack _ username > ]
Stop a timer .
_ name _ works the same as for ` start ` .
If given _ slack _ username _ , reply with an at - mention to the given user ."""
|
name = opts [ '--name' ]
slack_username = opts [ '--notify' ]
now = datetime . datetime . now ( )
delta = now - bot . timers . pop ( name )
response = bot . stop_fmt . format ( delta )
if slack_username :
mention = ''
# The slack api ( provided by https : / / github . com / os / slacker ) is available on all bots .
users = bot . slack . users . list ( ) . body [ 'members' ]
for user in users :
if user [ 'name' ] == slack_username :
mention = "<@%s>" % user [ 'id' ]
break
response = "%s: %s" % ( mention , response )
return response
|
def precesion_nutation ( date ) :
"""Precession / nutation joint rotation matrix for the IAU2010 model"""
|
X , Y , s = _xys ( date )
d = np . arctan ( np . sqrt ( ( X ** 2 + Y ** 2 ) / ( 1 - X ** 2 - Y ** 2 ) ) )
a = 1 / ( 1 + np . cos ( d ) )
return np . array ( [ [ 1 - a * X ** 2 , - a * X * Y , X ] , [ - a * X * Y , 1 - a * Y ** 2 , Y ] , [ - X , - Y , 1 - a * ( X ** 2 + Y ** 2 ) ] ] ) @ rot3 ( s )
|
def _is_blacklisted_filename ( filepath ) :
"""Checks if the filename matches filename _ blacklist
blacklist is a list of filenames ( str ) and / or file patterns ( dict )
string , specifying an exact filename to ignore
[ " . DS _ Store " , " Thumbs . db " ]
mapping ( dict ) , where each dict contains :
' match ' - ( if the filename matches the pattern , the filename
is blacklisted )
' is _ regex ' - if True , the pattern is treated as a
regex . If False , simple substring check is used ( if
' match ' in filename ) . Default is False
' full _ path ' - if True , full path is checked . If False , only
filename is checked . Default is False .
' exclude _ extension ' - if True , the extension is removed
from the file before checking . Default is False .
: param str filepath : an absolute path and filename to check against
the blacklist
: returns : flag indicating if the file was matched in the blacklist
: rtype : bool"""
|
if not cfg . CONF . filename_blacklist :
return False
filename = os . path . basename ( filepath )
fname = os . path . splitext ( filename ) [ 0 ]
blacklisted = False
for fblacklist in cfg . CONF . filename_blacklist :
if isinstance ( fblacklist , dict ) :
to_check = filename
if fblacklist . get ( 'exclude_extension' , False ) :
to_check = fname
if fblacklist . get ( 'full_path' , False ) :
to_check = filepath
if fblacklist . get ( 'is_regex' , False ) :
blacklisted = re . match ( fblacklist [ 'match' ] , to_check ) is not None
else :
blacklisted = ( fblacklist [ 'match' ] in to_check )
else :
blacklisted = ( filename == fblacklist )
if blacklisted :
break
return blacklisted
|
def app_add_developers ( app_name_or_id , alias = None , input_params = { } , always_retry = True , ** kwargs ) :
"""Invokes the / app - xxxx / addDevelopers API method .
For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Apps # API - method : - / app - xxxx % 5B / yyyy % 5D / addDevelopers"""
|
fully_qualified_version = app_name_or_id + ( ( '/' + alias ) if alias else '' )
return DXHTTPRequest ( '/%s/addDevelopers' % fully_qualified_version , input_params , always_retry = always_retry , ** kwargs )
|
def hierarchy_pos ( links , root , width = 1. , vert_gap = 0.2 , vert_loc = 0 , xcenter = 0.5 , pos = None , parent = None , min_dx = 0.03 ) :
'''If there is a cycle that is reachable from root , then this will see infinite recursion .
G : the graph
root : the root node of current branch
width : horizontal space allocated for this branch - avoids overlap with other branches
vert _ gap : gap between levels of hierarchy
vert _ loc : vertical location of root
xcenter : horizontal location of root
pos : a dict saying where all nodes go if they have been assigned
parent : parent of this branch .'''
|
if pos is None :
pos = { root : ( xcenter , vert_loc ) }
else :
pos [ root ] = ( xcenter , vert_loc )
neighbors = get_sandbox_neighbours ( links , root )
if len ( neighbors ) != 0 :
dx = max ( width / len ( neighbors ) , min_dx )
# nextx = xcenter - width / 2 - dx / 2
nextx = pos [ root ] [ 0 ] - ( len ( neighbors ) - 1 ) * dx / 2 - dx
for neighbor in neighbors :
nextx += dx
pos = hierarchy_pos ( links , neighbor , width = dx , vert_gap = vert_gap , vert_loc = vert_loc - vert_gap , xcenter = nextx , pos = pos , parent = root )
return pos
|
def _initialize_parameters ( state_machine , n_features ) :
"""Helper to create initial parameter vector with the correct shape ."""
|
return np . zeros ( ( state_machine . n_states + state_machine . n_transitions , n_features ) )
|
def get_app_model_voice ( self , app_model_item ) :
"""App Model voice
Returns the js menu compatible voice dict if the user
can see it , None otherwise"""
|
if app_model_item . get ( 'name' , None ) is None :
raise ImproperlyConfigured ( 'Model menu voices must have a name key' )
# noqa
if app_model_item . get ( 'app' , None ) is None :
raise ImproperlyConfigured ( 'Model menu voices must have an app key' )
# noqa
return self . get_model_voice ( app_model_item . get ( 'app' ) , app_model_item )
|
def simulate ( self , time , feedforwardInputI , feedforwardInputE , v , recurrent = True , dt = None , envelope = False , inputNoise = None , sampleFreq = 1 , startFrom = 0 , save = True ) :
""": param time : Amount of time to simulate .
Divided into chunks of len dt .
: param feedforwardInputI : feedforward input to inhibitory cells . Must have
shape ( numInhibitory , ) . Should be total input over period time .
: param feedforwardInputE : feedforward input to excitatory cells . Must have
shape ( numExcitatory , ) . Applied equally to ER and EL cells .
Should be total input over period time .
: param v : Velocity . Should be a scalar .
: param recurrent : whether or not recurrent connections should be used .
Set to False during training to follow the methods of the original
model .
: return : Nothing . All changes internal ."""
|
# Set up plotting
if self . plotting :
self . fig = plt . figure ( )
self . ax1 = self . fig . add_subplot ( 311 )
self . ax2 = self . fig . add_subplot ( 312 )
self . ax3 = self . fig . add_subplot ( 313 )
plt . ion ( )
self . ax1 . set_xlabel ( "Excitatory population activity" )
self . ax2 . set_xlabel ( "Inhibitory population activity" )
plt . tight_layout ( )
self . fig . show ( )
self . fig . canvas . draw ( )
self . activationsI = np . random . random_sample ( self . activationsI . shape )
self . activationsEL = np . random . random_sample ( self . activationsEL . shape )
self . activationsER = np . random . random_sample ( self . activationsER . shape )
self . activationsP . fill ( 0 )
self . activationHistoryI . fill ( 0 )
self . activationHistoryEL . fill ( 0 )
self . activationHistoryER . fill ( 0 )
if dt is None :
oldDt = self . dt
else :
oldDt = self . dt
self . dt = dt
times = np . arange ( 0 , time , self . dt )
samples = np . arange ( startFrom , time , self . dt )
results = np . zeros ( ( len ( samples ) / sampleFreq , len ( self . activationsI ) ) )
s = 0
for i , t in enumerate ( times ) :
if inputNoise is not None :
noisesI = np . random . random_sample ( feedforwardInputI . shape ) * inputNoise
noisesE = np . random . random_sample ( feedforwardInputE . shape ) * inputNoise
else :
noisesE = 1. ;
noisesI = 1.
self . activationsP = np . zeros ( self . activationsP . shape )
self . update ( feedforwardInputI * noisesI , feedforwardInputE * noisesE , v , recurrent , envelope = envelope )
if i % sampleFreq == 0 and t >= startFrom and save :
results [ s ] = self . activationsI
print ( "At {}" . format ( t ) )
s += 1
if self . plotting :
plotTime = np . abs ( np . mod ( t , PLOT_INTERVAL ) )
if plotTime <= 0.00001 or np . abs ( plotTime - PLOT_INTERVAL ) <= 0.00001 :
self . plotActivation ( time = t , velocity = v )
self . dt = oldDt
if save :
return results
|
def getEncodableAttributes ( self , obj , codec = None ) :
"""Must return a C { dict } of attributes to be encoded , even if its empty .
@ param codec : An optional argument that will contain the encoder
instance calling this function .
@ since : 0.5"""
|
if not self . _compiled :
self . compile ( )
if self . is_dict :
return dict ( obj )
if self . shortcut_encode and self . dynamic :
return obj . __dict__ . copy ( )
attrs = { }
if self . static_attrs :
for attr in self . static_attrs :
attrs [ attr ] = getattr ( obj , attr , pyamf . Undefined )
if not self . dynamic :
if self . non_static_encodable_properties :
for attr in self . non_static_encodable_properties :
attrs [ attr ] = getattr ( obj , attr )
return attrs
dynamic_props = util . get_properties ( obj )
if not self . shortcut_encode :
dynamic_props = set ( dynamic_props )
if self . encodable_properties :
dynamic_props . update ( self . encodable_properties )
if self . static_attrs :
dynamic_props . difference_update ( self . static_attrs )
if self . exclude_attrs :
dynamic_props . difference_update ( self . exclude_attrs )
for attr in dynamic_props :
attrs [ attr ] = getattr ( obj , attr )
if self . proxy_attrs is not None and attrs and codec :
context = codec . context
for k , v in attrs . copy ( ) . iteritems ( ) :
if k in self . proxy_attrs :
attrs [ k ] = context . getProxyForObject ( v )
if self . synonym_attrs :
missing = object ( )
for k , v in self . synonym_attrs . iteritems ( ) :
value = attrs . pop ( k , missing )
if value is missing :
continue
attrs [ v ] = value
return attrs
|
def get_pipeline_steps ( pipeline , steps_group ) :
"""Get the steps attribute of module pipeline .
If there is no steps sequence on the pipeline , return None . Guess you
could theoretically want to run a pipeline with nothing in it ."""
|
logger . debug ( "starting" )
assert pipeline
assert steps_group
logger . debug ( f"retrieving {steps_group} steps from pipeline" )
if steps_group in pipeline :
steps = pipeline [ steps_group ]
if steps is None :
logger . warn ( f"{steps_group}: sequence has no elements. So it won't do " "anything." )
logger . debug ( "done" )
return None
steps_count = len ( steps )
logger . debug ( f"{steps_count} steps found under {steps_group} in " "pipeline definition." )
logger . debug ( "done" )
return steps
else :
logger . debug ( f"pipeline doesn't have a {steps_group} collection. Add a " f"{steps_group}: sequence to the yaml if you want {steps_group} " "actually to do something." )
logger . debug ( "done" )
return None
|
def get_family_notes ( family , data_dir = None ) :
'''Return a string representing the notes about a basis set family
If the notes are not found , an empty string is returned'''
|
file_path = _family_notes_path ( family , data_dir )
notes_str = fileio . read_notes_file ( file_path )
if notes_str is None :
notes_str = ""
ref_data = get_reference_data ( data_dir )
return notes . process_notes ( notes_str , ref_data )
|
def _create_model ( model , ident , ** params ) :
"""Create a model by cloning and then setting params"""
|
with log_errors ( pdb = True ) :
model = clone ( model ) . set_params ( ** params )
return model , { "model_id" : ident , "params" : params , "partial_fit_calls" : 0 }
|
def upload_report ( server , payload , timeout = HQ_DEFAULT_TIMEOUT ) :
"""Upload a report to the server .
: param payload : Dictionary ( JSON serializable ) of crash data .
: return : server response"""
|
try :
data = json . dumps ( payload )
r = requests . post ( server + '/reports/upload' , data = data , timeout = timeout )
except Exception as e :
logging . error ( e )
return False
return r
|
def apply_on_csv_string ( rules_str , func ) :
"""Splits a given string by comma , trims whitespace on the resulting strings and applies a given ` ` ` func ` ` ` to
each item ."""
|
splitted = rules_str . split ( "," )
for str in splitted :
func ( str . strip ( ) )
|
def getoptS ( X , Y , M_E , E ) :
'''Find Sopt given X , Y'''
|
n , r = X . shape
C = np . dot ( np . dot ( X . T , M_E ) , Y )
C = C . flatten ( )
A = np . zeros ( ( r * r , r * r ) )
for i in range ( r ) :
for j in range ( r ) :
ind = j * r + i
temp = np . dot ( np . dot ( X . T , np . dot ( X [ : , i , None ] , Y [ : , j , None ] . T ) * E ) , Y )
A [ : , ind ] = temp . flatten ( )
S = np . linalg . solve ( A , C )
return np . reshape ( S , ( r , r ) ) . T
|
def recursive_dict_to_dict ( rdict ) :
"""Convert a recursive dict to a plain ol ' dict ."""
|
d = { }
for ( k , v ) in rdict . items ( ) :
if isinstance ( v , defaultdict ) :
d [ k ] = recursive_dict_to_dict ( v )
else :
d [ k ] = v
return d
|
def build_standard_field ( self , field_name , model_field ) :
"""Creates a default instance of a basic non - relational field ."""
|
kwargs = { }
if model_field . null or model_field . blank :
kwargs [ 'required' ] = False
if model_field . null :
kwargs [ 'allow_null' ] = True
if model_field . blank and ( issubclass ( model_field . __class__ , models . CharField ) or ( issubclass ( model_field . __class__ , models . TextField ) ) ) :
kwargs [ 'allow_blank' ] = True
if isinstance ( model_field , models . AutoField ) or not model_field . editable :
kwargs [ 'read_only' ] = True
if model_field . has_default ( ) :
kwargs [ 'default' ] = model_field . get_default ( )
if issubclass ( model_field . __class__ , models . TextField ) :
kwargs [ 'style' ] = { 'base_template' : 'textarea.html' }
if model_field . verbose_name is not None :
kwargs [ 'label' ] = model_field . verbose_name
if model_field . help_text is not None :
kwargs [ 'help_text' ] = model_field . help_text
# TODO : TypedChoiceField ?
if model_field . flatchoices : # This ModelField contains choices
kwargs [ 'choices' ] = model_field . flatchoices
if model_field . null :
kwargs [ 'empty' ] = None
return ( ChoiceField , kwargs )
# put this below the ChoiceField because min _ value isn ' t a valid initializer
if issubclass ( model_field . __class__ , models . PositiveIntegerField ) or issubclass ( model_field . __class__ , models . PositiveSmallIntegerField ) :
kwargs [ 'min_value' ] = 0
attribute_dict = { models . CharField : [ 'max_length' ] , models . CommaSeparatedIntegerField : [ 'max_length' ] , models . DecimalField : [ 'max_digits' , 'decimal_places' ] , models . EmailField : [ 'max_length' ] , models . FileField : [ 'max_length' ] , models . ImageField : [ 'max_length' ] , models . SlugField : [ 'max_length' ] , models . URLField : [ 'max_length' ] , }
# = = = django - rest - framework - hstore specific = = = =
# if available , use _ _ basefield _ _ attribute instead of _ _ class _ _
# this will cause DRF to pick the correct DRF - field
key = getattr ( model_field , '__basefield__' , model_field . __class__ )
if key in attribute_dict :
attributes = attribute_dict [ key ]
for attribute in attributes :
kwargs . update ( { attribute : getattr ( model_field , attribute ) } )
if model_field . __class__ == DictionaryField and model_field . schema :
kwargs [ 'schema' ] = True
try :
return ( self . serializer_field_mapping [ key ] , kwargs )
except KeyError :
pass
try :
return ( self . serializer_field_mapping [ model_field . __class__ . __name__ ] , kwargs )
except KeyError : # return ModelField ( model _ field = model _ field , * * kwargs )
return super ( HStoreSerializer , self ) . build_standard_field ( field_name , model_field )
|
def has_overflow ( self , params ) :
"""detect inf and nan"""
|
is_not_finite = 0
for param in params :
if param . grad_req != 'null' :
grad = param . list_grad ( ) [ 0 ]
is_not_finite += mx . nd . contrib . isnan ( grad ) . sum ( )
is_not_finite += mx . nd . contrib . isinf ( grad ) . sum ( )
# NDArray is implicitly converted to bool
if is_not_finite == 0 :
return False
else :
return True
|
def neighbors ( self , node_id ) :
"""Find all the nodes where there is an edge from the specified node to that node .
Returns a list of node ids ."""
|
node = self . get_node ( node_id )
return [ self . get_edge ( edge_id ) [ 'vertices' ] [ 1 ] for edge_id in node [ 'edges' ] ]
|
def collate ( self , merge_type = None , drop = [ ] , drop_constant = False ) :
"""Collate allows reordering nested containers
Collation allows collapsing nested mapping types by merging
their dimensions . In simple terms in merges nested containers
into a single merged type .
In the simple case a HoloMap containing other HoloMaps can
easily be joined in this way . However collation is
particularly useful when the objects being joined are deeply
nested , e . g . you want to join multiple Layouts recorded at
different times , collation will return one Layout containing
HoloMaps indexed by Time . Changing the merge _ type will allow
merging the outer Dimension into any other UniformNdMapping
type .
Args :
merge _ type : Type of the object to merge with
drop : List of dimensions to drop
drop _ constant : Drop constant dimensions automatically
Returns :
Collated Layout or HoloMap"""
|
from . element import Collator
merge_type = merge_type if merge_type else self . __class__
return Collator ( self , merge_type = merge_type , drop = drop , drop_constant = drop_constant ) ( )
|
def bm3_g ( p , v0 , g0 , g0p , k0 , k0p ) :
"""calculate shear modulus at given pressure .
not fully tested with mdaap .
: param p : pressure
: param v0 : volume at reference condition
: param g0 : shear modulus at reference condition
: param g0p : pressure derivative of shear modulus at reference condition
: param k0 : bulk modulus at reference condition
: param k0p : pressure derivative of bulk modulus at reference condition
: return : shear modulus at high pressure"""
|
return cal_g_bm3 ( p , [ g0 , g0p ] , [ v0 , k0 , k0p ] )
|
def _update_failure_type ( self ) :
"""Updates the failure type of this Note ' s Job .
Set the linked Job ' s failure type to that of the most recent JobNote or
set to Not Classified if there are no JobNotes .
This is called when JobNotes are created ( via . save ( ) ) and deleted ( via
. delete ( ) ) and is used to resolved the FailureClassification which has
been denormalised onto Job ."""
|
# update the job classification
note = JobNote . objects . filter ( job = self . job ) . order_by ( '-created' ) . first ( )
if note :
self . job . failure_classification_id = note . failure_classification . id
else :
self . job . failure_classification_id = FailureClassification . objects . get ( name = 'not classified' ) . id
self . job . save ( )
|
def update_stock_codes ( ) :
"""获取所有股票 ID 到 all _ stock _ code 目录下"""
|
all_stock_codes_url = "http://www.shdjt.com/js/lib/astock.js"
grep_stock_codes = re . compile ( r"~(\d+)`" )
response = requests . get ( all_stock_codes_url )
all_stock_codes = grep_stock_codes . findall ( response . text )
with open ( stock_code_path ( ) , "w" ) as f :
f . write ( json . dumps ( dict ( stock = all_stock_codes ) ) )
|
def add ( self , original_index , operation ) :
"""Add an operation to this Run instance .
: Parameters :
- ` original _ index ` : The original index of this operation
within a larger bulk operation .
- ` operation ` : The operation document ."""
|
self . index_map . append ( original_index )
self . ops . append ( operation )
|
def spanning_tree_count ( graph : nx . Graph ) -> int :
"""Return the number of unique spanning trees of a graph , using
Kirchhoff ' s matrix tree theorem ."""
|
laplacian = nx . laplacian_matrix ( graph ) . toarray ( )
comatrix = laplacian [ : - 1 , : - 1 ]
det = np . linalg . det ( comatrix )
count = int ( round ( det ) )
return count
|
def tuple_pairs_to_dict ( input_tuple ) :
"""This function takes a tuple of elements and uses every two successive elements to generate a dictionary .
Each pair of elements becomes a key - value pair in the resulting dictionary .
If there is an odd number of elements in the tuple , the last element is ignored .
: param input _ tuple : A tuple of elements
Example :
> > > tuple _ pairs _ to _ dict ( ( 1 , 5 , 7 , 10 , 13 , 5 ) )
{1 : 5 , 7 : 10 , 13 : 5}
> > > tuple _ pairs _ to _ dict ( ( 1 , 2 , 3 , 4 , 5 , 6 ) )
{1 : 2 , 3 : 4 , 5 : 6}
> > > tuple _ pairs _ to _ dict ( ( 7 , 8 , 9 , 10 , 11 , 12 ) )
{7 : 8 , 9 : 10 , 11 : 12}
: return : A dictionary where each key - value pair is derived from two successive elements in the input tuple ."""
|
result_dict = { input_tuple [ i ] : input_tuple [ i + 1 ] for i in range ( 0 , len ( input_tuple ) - 1 , 2 ) }
return result_dict
|
def pick_event ( self ) :
"""Extracts an event ( if available ) from the Events queue ."""
|
logger . debug ( "checking event queue" )
event = snap7 . snap7types . SrvEvent ( )
ready = ctypes . c_int32 ( )
code = self . library . Srv_PickEvent ( self . pointer , ctypes . byref ( event ) , ctypes . byref ( ready ) )
check_error ( code )
if ready :
logger . debug ( "one event ready: %s" % event )
return event
logger . debug ( "no events ready" )
|
def _host_libc ( self ) :
"""Use the - - libc - dir option if provided , otherwise invoke a host compiler to find libc dev ."""
|
libc_dir_option = self . get_options ( ) . libc_dir
if libc_dir_option :
maybe_libc_crti = os . path . join ( libc_dir_option , self . _LIBC_INIT_OBJECT_FILE )
if os . path . isfile ( maybe_libc_crti ) :
return HostLibcDev ( crti_object = maybe_libc_crti , fingerprint = hash_file ( maybe_libc_crti ) )
raise self . HostLibcDevResolutionError ( "Could not locate {} in directory {} provided by the --libc-dir option." . format ( self . _LIBC_INIT_OBJECT_FILE , libc_dir_option ) )
return self . _get_host_libc_from_host_compiler ( )
|
def send_email ( self , body = None , subject = None , to = list , cc = None , bcc = None , send_as = None , attachments = None ) :
"""Sends an email in one method , a shortcut for creating an instance of
: class : ` Message < pyOutlook . core . message . Message > ` .
Args :
body ( str ) : The body of the email
subject ( str ) : The subject of the email
to ( list ) : A list of : class : ` Contacts < pyOutlook . core . contact . Contact > `
cc ( list ) : A list of : class : ` Contacts < pyOutlook . core . contact . Contact > ` which will be added to the
' Carbon Copy ' line
bcc ( list ) : A list of : class : ` Contacts < pyOutlook . core . contact . Contact > ` while be blindly added to the email
send _ as ( Contact ) : A : class : ` Contact < pyOutlook . core . contact . Contact > ` whose email the OutlookAccount
has access to
attachments ( list ) : A list of dictionaries with two parts
[1 ] ' name ' - a string which will become the file ' s name
[2 ] ' bytes ' - the bytes of the file ."""
|
email = Message ( self . access_token , body , subject , to , cc = cc , bcc = bcc , sender = send_as )
if attachments is not None :
for attachment in attachments :
email . attach ( attachment . get ( 'bytes' ) , attachment . get ( 'name' ) )
email . send ( )
|
def _parse_ISBN_EAN ( details ) :
"""Parse ISBN and EAN .
Args :
details ( obj ) : HTMLElement containing slice of the page with details .
Returns :
( ISBN , EAN ) : Tuple with two string or two None ."""
|
isbn_ean = _get_td_or_none ( details , "ctl00_ContentPlaceHolder1_tblRowIsbnEan" )
if not isbn_ean :
return None , None
ean = None
isbn = None
if "/" in isbn_ean : # ISBN and EAN are stored in same string
isbn , ean = isbn_ean . split ( "/" )
isbn = isbn . strip ( )
ean = ean . strip ( )
else :
isbn = isbn_ean . strip ( )
if not isbn :
isbn = None
return isbn , ean
|
def parse_simulation ( self , node ) :
"""Parses < Simulation >
@ param node : Node containing the < Simulation > element
@ type node : xml . etree . Element"""
|
self . current_simulation = self . current_component_type . simulation
self . process_nested_tags ( node )
self . current_simulation = None
|
def get_vnetwork_hosts_input_name ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_vnetwork_hosts = ET . Element ( "get_vnetwork_hosts" )
config = get_vnetwork_hosts
input = ET . SubElement ( get_vnetwork_hosts , "input" )
name = ET . SubElement ( input , "name" )
name . text = kwargs . pop ( 'name' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def write_relationships ( self , file_name , flat = True ) :
"""This module will output the eDNA tags which are used inside each
calculation .
If flat = True , data will be written flat , like :
ADE1CA01 , ADE1PI01 , ADE1PI02
If flat = False , data will be written in the non - flat way , like :
ADE1CA01 , ADE1PI01
ADE1CA01 , ADE1PI02
: param file _ name : the output filename to write the relationships ,
which should include the ' . csv ' extension
: param flat : True or False"""
|
with open ( file_name , 'w' ) as writer :
if flat :
self . _write_relationships_flat ( writer )
else :
self . _write_relationships_non_flat ( writer )
|
def load ( image ) :
r"""Loads the ` ` image ` ` and returns a ndarray with the image ' s pixel content as well as
a header object .
The header can , with restrictions , be used to extract additional meta - information
about the image ( e . g . using the methods in ` ~ medpy . io . Header ` ) . Additionally
it serves as meta - data container that can be passes to ` ~ medpy . io . save . save ` when the
altered image is saved to the hard drive again . Note that the transfer of meta - data is
only possible , and even then not guaranteed , when the source and target image formats
are the same .
MedPy relies on SimpleITK , which enables the power of ITK for image loading and saving .
The supported image file formats should include at least the following .
Medical formats :
- ITK MetaImage ( . mha / . raw , . mhd )
- Neuroimaging Informatics Technology Initiative ( NIfTI ) ( . nia , . nii , . nii . gz , . hdr , . img , . img . gz )
- Analyze ( plain , SPM99 , SPM2 ) ( . hdr / . img , . img . gz )
- Digital Imaging and Communications in Medicine ( DICOM ) ( . dcm , . dicom )
- Digital Imaging and Communications in Medicine ( DICOM ) series ( < directory > / )
- Nearly Raw Raster Data ( Nrrd ) ( . nrrd , . nhdr )
- Medical Imaging NetCDF ( MINC ) ( . mnc , . MNC )
- Guys Image Processing Lab ( GIPL ) ( . gipl , . gipl . gz )
Microscopy formats :
- Medical Research Council ( MRC ) ( . mrc , . rec )
- Bio - Rad ( . pic , . PIC )
- LSM ( Zeiss ) microscopy images ( . tif , . TIF , . tiff , . TIFF , . lsm , . LSM )
- Stimulate / Signal Data ( SDT ) ( . sdt )
Visualization formats :
- VTK images ( . vtk )
Other formats :
- Portable Network Graphics ( PNG ) ( . png , . PNG )
- Joint Photographic Experts Group ( JPEG ) ( . jpg , . JPG , . jpeg , . JPEG )
- Tagged Image File Format ( TIFF ) ( . tif , . TIF , . tiff , . TIFF )
- Windows bitmap ( . bmp , . BMP )
- Hierarchical Data Format ( HDF5 ) ( . h5 , . hdf5 , . he5)
- MSX - DOS Screen - x ( . ge4 , . ge5)
For informations about which image formats , dimensionalities and pixel data types
your current configuration supports , run ` python3 tests / support . py > myformats . log ` .
Further information see https : / / simpleitk . readthedocs . io .
Parameters
image : string
Path to the image to load .
Returns
image _ data : ndarray
The image data as numpy array with order ` x , y , z , c ` .
image _ header : Header
The image metadata as : mod : ` medpy . io . Header ` .
Raises
ImageLoadingError
If the image could not be loaded due to some reason ."""
|
logger = Logger . getInstance ( )
logger . info ( 'Loading image {}...' . format ( image ) )
if not os . path . exists ( image ) :
raise ImageLoadingError ( 'The supplied image {} does not exist.' . format ( image ) )
if os . path . isdir ( image ) : # ! TODO : this does not load the meta - data , find a way to load it from a series , too
logger . info ( 'Loading image as DICOM series. If more than one found in folder {} defaulting to first.' . format ( image ) )
sitkimage = sitk . ReadImage ( sitk . ImageSeriesReader_GetGDCMSeriesFileNames ( image ) )
else :
sitkimage = sitk . ReadImage ( image )
# Make image array data and header
header = Header ( sitkimage = sitkimage )
image = sitk . GetArrayFromImage ( sitkimage )
# Roll axes from z , y , x , c to x , y , z , c
if image . ndim == 4 :
image = np . moveaxis ( image , - 1 , 0 )
image = image . T
return image , header
|
def encode_array ( input_array , codec , param ) :
"""Encode the array using the method and then add the header to this array .
: param input _ array : the array to be encoded
: param codec : the integer index of the codec to use
: param param : the integer parameter to use in the function
: return an array with the header added to the fornt"""
|
return add_header ( codec_dict [ codec ] . encode ( input_array , param ) , codec , len ( input_array ) , param )
|
def get_syslog_config ( host , username , password , protocol = None , port = None , esxi_hosts = None , credstore = None ) :
'''Retrieve the syslog configuration .
host
The location of the host .
username
The username used to login to the host , such as ` ` root ` ` .
password
The password used to login to the host .
protocol
Optionally set to alternate protocol if the host is not using the default
protocol . Default protocol is ` ` https ` ` .
port
Optionally set to alternate port if the host is not using the default
port . Default port is ` ` 443 ` ` .
esxi _ hosts
If ` ` host ` ` is a vCenter host , then use esxi _ hosts to execute this function
on a list of one or more ESXi machines .
credstore
Optionally set to path to the credential store file .
: return : Dictionary with keys and values corresponding to the
syslog configuration , per host .
CLI Example :
. . code - block : : bash
# Used for ESXi host connection information
salt ' * ' vsphere . get _ syslog _ config my . esxi . host root bad - password
# Used for connecting to a vCenter Server
salt ' * ' vsphere . get _ syslog _ config my . vcenter . location root bad - password esxi _ hosts = ' [ esxi - 1 . host . com , esxi - 2 . host . com ] ' '''
|
cmd = 'system syslog config get'
ret = { }
if esxi_hosts :
if not isinstance ( esxi_hosts , list ) :
raise CommandExecutionError ( '\'esxi_hosts\' must be a list.' )
for esxi_host in esxi_hosts :
response = salt . utils . vmware . esxcli ( host , username , password , cmd , protocol = protocol , port = port , esxi_host = esxi_host , credstore = credstore )
# format the response stdout into something useful
ret . update ( { esxi_host : _format_syslog_config ( response ) } )
else : # Handles a single host or a vCenter connection when no esxi _ hosts are provided .
response = salt . utils . vmware . esxcli ( host , username , password , cmd , protocol = protocol , port = port , credstore = credstore )
# format the response stdout into something useful
ret . update ( { host : _format_syslog_config ( response ) } )
return ret
|
def set_html5_doctype ( self ) :
"""Method used to transform a doctype in to a properly html5 doctype"""
|
doctype = b'<!DOCTYPE html>\n'
content = doctype_re . subn ( doctype , self . response . content , 1 ) [ 0 ]
self . response . content = content
|
def CreateServiceProto ( job ) :
"""Create the Service protobuf .
Args :
job : Launchdjobdict from servicemanagement framework .
Returns :
sysinfo _ pb2 . OSXServiceInformation proto"""
|
service = rdf_client . OSXServiceInformation ( label = job . get ( "Label" ) , program = job . get ( "Program" ) , sessiontype = job . get ( "LimitLoadToSessionType" ) , lastexitstatus = int ( job [ "LastExitStatus" ] ) , timeout = int ( job [ "TimeOut" ] ) , ondemand = bool ( job [ "OnDemand" ] ) )
for arg in job . get ( "ProgramArguments" , "" , stringify = False ) : # Returns CFArray of CFStrings
service . args . Append ( str ( arg ) )
mach_dict = job . get ( "MachServices" , { } , stringify = False )
for key , value in iteritems ( mach_dict ) :
service . machservice . Append ( "%s:%s" % ( key , value ) )
job_mach_dict = job . get ( "PerJobMachServices" , { } , stringify = False )
for key , value in iteritems ( job_mach_dict ) :
service . perjobmachservice . Append ( "%s:%s" % ( key , value ) )
if "PID" in job :
service . pid = job [ "PID" ] . value
return service
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.