signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_version ( self ) :
"""Get the version object for the related object .""" | return Version . objects . get ( content_type = self . content_type , object_id = self . object_id , version_number = self . publish_version , ) |
def serialize_packet_id ( packet : dict ) -> str :
"""Serialize packet identifiers into one reversable string .
> > > serialize _ packet _ id ( {
. . . ' protocol ' : ' newkaku ' ,
. . . ' id ' : ' 000001 ' ,
. . . ' switch ' : ' 01 ' ,
. . . ' command ' : ' on ' ,
' newkaku _ 000001_01'
> > > serialize _ packet _ id ( {
. . . ' protocol ' : ' ikea koppla ' ,
. . . ' id ' : ' 000080 ' ,
. . . ' switch ' : ' 0 ' ,
. . . ' command ' : ' on ' ,
' ikeakoppla _ 000080_0'
> > > # unserializeable protocol name without explicit entry
> > > # in translation table should be properly serialized
> > > serialize _ packet _ id ( {
. . . ' protocol ' : ' alecto v4 ' ,
. . . ' id ' : ' 000080 ' ,
. . . ' switch ' : ' 0 ' ,
. . . ' command ' : ' on ' ,
' alectov4_000080_0'""" | # translate protocol in something reversable
protocol = protocol_translations [ packet [ 'protocol' ] ]
if protocol == UNKNOWN :
protocol = 'rflink'
return '_' . join ( filter ( None , [ protocol , packet . get ( 'id' , None ) , packet . get ( 'switch' , None ) , ] ) ) |
def _compute_std ( self , C , stddevs , idx ) :
"""Compute total standard deviation , see tables 3 and 4 , pages 227 and
228.""" | for stddev in stddevs :
stddev [ idx ] += C [ 'sigma' ] |
def create_driver_script ( driver , script_create = None ) : # noqa : E501
"""Create a new script
Create a new script # noqa : E501
: param driver : The driver to use for the request . ie . github
: type driver : str
: param script _ create : The data needed to create this script
: type script _ create : dict | bytes
: rtype : Response""" | if connexion . request . is_json :
script_create = ScriptCreate . from_dict ( connexion . request . get_json ( ) )
# noqa : E501
response = errorIfUnauthorized ( role = 'developer' )
if response :
return response
else :
response = ApitaxResponse ( )
driver : Driver = LoadedDrivers . getDriver ( driver )
driver . saveDriverScript ( script_create . script . name , script_create . script . content )
return Response ( status = 200 , body = response . getResponseBody ( ) ) |
def db004 ( self , value = None ) :
"""Corresponds to IDD Field ` db004 `
Dry - bulb temperature corresponding to 0.4 % annual cumulative frequency of occurrence ( warm conditions )
Args :
value ( float ) : value for IDD Field ` db004 `
Unit : C
if ` value ` is None it will not be checked against the
specification and is assumed to be a missing value
Raises :
ValueError : if ` value ` is not a valid value""" | if value is not None :
try :
value = float ( value )
except ValueError :
raise ValueError ( 'value {} need to be of type float ' 'for field `db004`' . format ( value ) )
self . _db004 = value |
def process_bucket_set ( account_info , buckets ) :
"""Process a collection of buckets .
For each bucket fetch location , versioning and size and
then kickoff processing strategy based on size .""" | region_clients = { }
log = logging . getLogger ( 'salactus.bucket-set' )
log . info ( "processing account %s" , account_info )
session = get_session ( account_info )
client = session . client ( 's3' , config = s3config )
for b in buckets :
bid = bucket_id ( account_info , b )
with bucket_ops ( bid ) :
info = { 'name' : b }
error = None
try :
location = client . get_bucket_location ( Bucket = b ) . get ( 'LocationConstraint' )
except Exception as e :
error = e
location = None
if location is None :
region = "us-east-1"
elif location == 'EU' :
region = "eu-west-1"
else :
region = location
if ( account_info . get ( 'regions' , ( ) ) and region not in account_info . get ( 'regions' , ( ) ) ) :
continue
info [ 'region' ] = region
if region not in region_clients :
region_clients . setdefault ( region , { } )
region_clients [ region ] [ 's3' ] = s3 = session . client ( 's3' , region_name = region , config = s3config )
region_clients [ region ] [ 'cloudwatch' ] = cw = session . client ( 'cloudwatch' , region_name = region , config = s3config )
else :
s3 = region_clients [ region ] [ 's3' ]
cw = region_clients [ region ] [ 'cloudwatch' ]
try :
info [ 'keycount' ] = bucket_key_count ( cw , info )
except Exception :
raise
else :
connection . hset ( 'bucket-sizes' , bid , info [ 'keycount' ] )
if error :
raise error
connection . hset ( 'bucket-regions' , bid , region )
versioning = s3 . get_bucket_versioning ( Bucket = b )
info [ 'versioned' ] = ( versioning and versioning . get ( 'Status' , '' ) in ( 'Enabled' , 'Suspended' ) or False )
connection . hset ( 'bucket-versions' , bid , int ( info [ 'versioned' ] ) )
log . info ( "processing bucket %s" , info )
connection . hset ( 'bucket-starts' , bid , time . time ( ) )
dispatch_object_source ( s3 , account_info , bid , info ) |
def get_future ( self ) :
"""Get current and future forecasts .""" | now = dt . now ( )
four_days = now + timedelta ( hours = 96 )
now = now . timestamp ( )
four_days = four_days . timestamp ( )
url = build_url ( self . api_key , self . spot_id , self . fields , self . unit , now , four_days )
return get_msw ( url ) |
def delete ( self ) :
'''Deletes this model from the database , calling delete in each field
to properly delete special cases''' | redis = type ( self ) . get_redis ( )
for fieldname , field in self . proxy :
field . delete ( redis )
redis . delete ( self . key ( ) )
redis . srem ( type ( self ) . members_key ( ) , self . id )
if isinstance ( self , PermissionHolder ) :
redis . delete ( self . allow_key ( ) )
if self . notify :
data = json . dumps ( { 'event' : 'delete' , 'data' : self . to_json ( ) , } )
redis . publish ( type ( self ) . cls_key ( ) , data )
redis . publish ( self . key ( ) , data )
return self |
def suggest_localhost ( func ) :
'''Decorator : prompt user for value of env . host _ string with default to
' localhost ' when env . host _ string is empty .
Modification of decorator function fabric . network . needs _ host''' | from fabric . network import handle_prompt_abort , to_dict
@ wraps ( func )
def host_prompting_wrapper ( * args , ** kwargs ) :
while not env . get ( 'host_string' , False ) :
handle_prompt_abort ( "the target host connection string" )
host_string = raw_input ( "No hosts found. Please specify " "host string for connection [localhost]: " )
if host_string == '' :
host_string = 'localhost'
env . update ( to_dict ( host_string ) )
return func ( * args , ** kwargs )
host_prompting_wrapper . undecorated = func
return host_prompting_wrapper |
def __dispatch_request ( self , handler_input ) : # type : ( Input ) - > Union [ Output , None ]
"""Process the request and return handler output .
When the method is invoked , using the registered list of
: py : class : ` RequestMapper ` , a Handler Chain is found that can
handle the request . The handler invocation is delegated to the
supported : py : class : ` HandlerAdapter ` . The registered
request interceptors in the handler chain are processed before
executing the handler . The registered response interceptors in
the handler chain are processed after executing the handler .
: param handler _ input : generic input to the dispatcher containing
incoming request and other context .
: type handler _ input : Input
: return : Output from the ' handle ' method execution of the
supporting handler .
: rtype : Union [ None , Output ]
: raises DispatchException if there is no supporting
handler chain or adapter""" | request_handler_chain = None
for mapper in self . request_mappers :
request_handler_chain = mapper . get_request_handler_chain ( handler_input )
if request_handler_chain is not None :
break
if request_handler_chain is None :
raise DispatchException ( "Unable to find a suitable request handler" )
request_handler = request_handler_chain . request_handler
supported_handler_adapter = None
for adapter in self . handler_adapters :
if adapter . supports ( request_handler ) :
supported_handler_adapter = adapter
break
if supported_handler_adapter is None :
raise DispatchException ( "Unable to find a suitable request adapter" )
local_request_interceptors = request_handler_chain . request_interceptors
for interceptor in local_request_interceptors :
interceptor . process ( handler_input = handler_input )
output = supported_handler_adapter . execute ( handler_input = handler_input , handler = request_handler )
# type : Union [ Output , None ]
local_response_interceptors = ( request_handler_chain . response_interceptors )
for response_interceptor in local_response_interceptors :
response_interceptor . process ( handler_input = handler_input , dispatch_output = output )
return output |
def trace ( self , data , callback = None ) :
"""Trace data asynchronously .
If no one is listening for traced data , it will be dropped
otherwise it will be queued for sending .
Args :
data ( bytearray , string ) : Unstructured data to trace to any
connected client .
callback ( callable ) : Optional callback to get notified when
this data is actually sent .""" | if self . _push_channel is None :
return
self . _push_channel . trace ( data , callback = callback ) |
def paths_to_top ( self , term ) :
"""Returns all possible paths to the root node
Each path includes the term given . The order of the path is
top - > bottom , i . e . it starts with the root and ends with the
given term ( inclusively ) .
Parameters :
- term :
the id of the GO term , where the paths begin ( i . e . the
accession ' GO : 0003682 ' )
Returns :
- a list of lists of GO Terms""" | # error handling consistent with original authors
if term not in self :
print ( "Term %s not found!" % term , file = sys . stderr )
return
def _paths_to_top_recursive ( rec ) :
if rec . level == 0 :
return [ [ rec ] ]
paths = [ ]
for parent in rec . parents :
top_paths = _paths_to_top_recursive ( parent )
for top_path in top_paths :
top_path . append ( rec )
paths . append ( top_path )
return paths
go_term = self [ term ]
return _paths_to_top_recursive ( go_term ) |
def check_files ( self , to_path = None ) :
"""Check that all of the files contained in the archive are within the
target directory .""" | if to_path :
target_path = os . path . normpath ( os . path . realpath ( to_path ) )
else :
target_path = os . getcwd ( )
for filename in self . filenames ( ) :
extract_path = os . path . join ( target_path , filename )
extract_path = os . path . normpath ( os . path . realpath ( extract_path ) )
if not extract_path . startswith ( target_path ) :
raise UnsafeArchive ( "Archive member destination is outside the target" " directory. member: %s" % filename ) |
def logsumexp ( a , axis = None , b = None , use_numexpr = True ) :
"""Compute the log of the sum of exponentials of input elements .
Parameters
a : array _ like
Input array .
axis : None or int , optional , default = None
Axis or axes over which the sum is taken . By default ` axis ` is None ,
and all elements are summed .
b : array - like , optional
Scaling factor for exp ( ` a ` ) must be of the same shape as ` a ` or
broadcastable to ` a ` .
use _ numexpr : bool , optional , default = True
If True , use the numexpr library to speed up the calculation , which
can give a 2-4X speedup when working with large arrays .
Returns
res : ndarray
The result , ` ` log ( sum ( exp ( a ) ) ) ` ` calculated in a numerically
more stable way . If ` b ` is given then ` ` log ( sum ( b * exp ( a ) ) ) ` `
is returned .
See Also
numpy . logaddexp , numpy . logaddexp2 , scipy . misc . logsumexp ( soon to be replaced with scipy . special . logsumexp )
Notes
This is based on scipy . misc . logsumexp but with optional numexpr
support for improved performance .""" | a = np . asarray ( a )
a_max = np . amax ( a , axis = axis , keepdims = True )
if a_max . ndim > 0 :
a_max [ ~ np . isfinite ( a_max ) ] = 0
elif not np . isfinite ( a_max ) :
a_max = 0
if b is not None :
b = np . asarray ( b )
if use_numexpr and HAVE_NUMEXPR :
out = np . log ( numexpr . evaluate ( "b * exp(a - a_max)" ) . sum ( axis ) )
else :
out = np . log ( np . sum ( b * np . exp ( a - a_max ) , axis = axis ) )
else :
if use_numexpr and HAVE_NUMEXPR :
out = np . log ( numexpr . evaluate ( "exp(a - a_max)" ) . sum ( axis ) )
else :
out = np . log ( np . sum ( np . exp ( a - a_max ) , axis = axis ) )
a_max = np . squeeze ( a_max , axis = axis )
out += a_max
return out |
def get_connection ( connection_id , deep = False ) :
"""Get Heroku Connection connection information .
For more details check the link -
https : / / devcenter . heroku . com / articles / heroku - connect - api # step - 8 - monitor - the - connection - and - mapping - status
Sample response from API call is below : :
" id " : " < connection _ id > " ,
" name " : " < app _ name > " ,
" resource _ name " : " < resource _ name > " ,
" schema _ name " : " salesforce " ,
" db _ key " : " DATABASE _ URL " ,
" state " : " IDLE " ,
" mappings " : [
" id " : " < mapping _ id > " ,
" object _ name " : " Account " ,
" state " : " SCHEMA _ CHANGED " ,
" id " : " < mapping _ id > " ,
" object _ name " : " Contact " ,
" state " : " SCHEMA _ CHANGED " ,
Args :
connection _ id ( str ) : ID for Heroku Connect ' s connection .
deep ( bool ) : Return information about the connection ’ s mappings ,
in addition to the connection itself . Defaults to ` ` False ` ` .
Returns :
dict : Heroku Connection connection information .
Raises :
requests . HTTPError : If an error occurred when accessing the connection detail API .
ValueError : If response is not a valid JSON .""" | url = os . path . join ( settings . HEROKU_CONNECT_API_ENDPOINT , 'connections' , connection_id )
payload = { 'deep' : deep }
response = requests . get ( url , params = payload , headers = _get_authorization_headers ( ) )
response . raise_for_status ( )
return response . json ( ) |
def set_property ( self , name , value ) :
"""Helper to set a property value by name , translating to correct
dbus type
See also : py : meth : ` get _ property `
: param str name : The property name in the object ' s dictionary
whose value shall be set .
: param value : Properties new value to be assigned .
: return :
: raises KeyError : if the property key is not found in the
object ' s dictionary
: raises dbus . Exception : org . bluez . Error . DoesNotExist
: raises dbus . Exception : org . bluez . Error . InvalidArguments""" | typeof = type ( self . get_property ( name ) )
self . _interface . SetProperty ( name , translate_to_dbus_type ( typeof , value ) ) |
def polyFitIgnoringOutliers ( x , y , deg = 2 , niter = 3 , nstd = 2 , return_outliers = False ) :
"""Returns :
( np . poly1d ) : callable function of polynomial fit excluding all outliers
Args :
deg ( int ) : degree of polynomial fit
n _ iter ( int ) : do linear regression n times
successive removing
nstd ( float ) : exclude outliers , if their deviation
is > [ nstd ] * standard deviation
return _ outliers ( bool ) : also return outlier positions as 2 . arg""" | if return_outliers :
a = all_outliers = np . zeros_like ( y , dtype = bool )
for i in range ( niter ) :
poly = np . polyfit ( x , y , deg )
p = np . poly1d ( poly )
if i == niter - 1 :
break
y_fit = p ( x )
dy = y - y_fit
std = ( dy ** 2 ) . mean ( ) ** 0.5
inliers = abs ( dy ) < nstd * std
if return_outliers :
a [ ~ inliers ] = True
if inliers . sum ( ) > deg + 1 :
x = x [ inliers ]
y = y [ inliers ]
if return_outliers :
a = a [ inliers ]
else :
break
if return_outliers :
return p , all_outliers
return p |
def get_form ( ) :
"""Return the form to use for commenting .""" | global form_class
from fluent_comments import appsettings
if form_class is None :
if appsettings . FLUENT_COMMENTS_FORM_CLASS :
from django . utils . module_loading import import_string
form_class = import_string ( appsettings . FLUENT_COMMENTS_FORM_CLASS )
else :
from fluent_comments . forms import FluentCommentForm
form_class = FluentCommentForm
return form_class |
def voice ( self , consonant ) :
'''Voices a consonant , by searching the sound inventory for a consonant having the same
features as the argument , but + voice .''' | voiced_consonant = deepcopy ( consonant )
voiced_consonant [ Voiced ] = Voiced . pos
return self . _find_sound ( voiced_consonant ) |
def apt_add_key ( keyid , keyserver = 'keyserver.ubuntu.com' , log = False ) :
"""trust a new PGP key related to a apt - repository""" | if log :
log_green ( 'trusting keyid %s from %s' % ( keyid , keyserver ) )
with settings ( hide ( 'warnings' , 'running' , 'stdout' ) ) :
sudo ( 'apt-key adv --keyserver %s --recv %s' % ( keyserver , keyid ) )
return True |
def update ( self , ** kwargs ) :
"""Change the configuration of the resource on the device .
This method uses Http PUT alter the service state on the device .
The attributes of the instance will be packaged as a dictionary . That
dictionary will be updated with kwargs . It is then submitted as JSON
to the device . Various edge cases are handled :
* read - only attributes that are unchangeable are removed
* ` ` tmCommand ` ` attribute removed prior to PUT
* ` ` agent ` ` attribute removed prior to PUT
* ` ` post ` ` attribute removed prior to PUT
: param kwargs : keys and associated values to alter on the device""" | self . __dict__ . pop ( 'tmCommand' , '' )
self . __dict__ . pop ( 'agent' , '' )
self . __dict__ . pop ( 'method' , '' )
super ( Real_Server , self ) . update ( ** kwargs ) |
def sent2features ( sentence , template ) :
"""extract features in a sentence
: type sentence : list of token , each token is a list of tag""" | return [ word2features ( sentence , i , template ) for i in range ( len ( sentence ) ) ] |
def BatchNorm ( inputs , axis = None , training = None , momentum = 0.9 , epsilon = 1e-5 , center = True , scale = True , beta_initializer = tf . zeros_initializer ( ) , gamma_initializer = tf . ones_initializer ( ) , virtual_batch_size = None , data_format = 'channels_last' , internal_update = False , sync_statistics = None ) :
"""Almost equivalent to ` tf . layers . batch _ normalization ` , but different ( and more powerful )
in the following :
1 . Accepts an alternative ` data _ format ` option when ` axis ` is None . For 2D input , this argument will be ignored .
2 . Default value for ` momentum ` and ` epsilon ` is different .
3 . Default value for ` training ` is automatically obtained from tensorpack ' s ` TowerContext ` , but can be overwritten .
4 . Support the ` internal _ update ` option , which cover more use cases than the standard collection - based update .
5 . Support the ` sync _ statistics ` option , which is very useful in small - batch models .
Args :
internal _ update ( bool ) : if False , add EMA update ops to
` tf . GraphKeys . UPDATE _ OPS ` . If True , update EMA inside the layer by control dependencies .
They are very similar in speed , but ` internal _ update = True ` is recommended and can be helpful when :
1 . BatchNorm is used inside dynamic control flow .
The collection - based update does not support dynamic control flows .
2 . BatchNorm layer is sometimes unused ( e . g . , when you have two networks to train alternatively ) .
Putting all update ops into a single collection will waste a lot of compute .
Corresponding TF issue : https : / / github . com / tensorflow / tensorflow / issues / 14699
sync _ statistics ( str or None ) : one of None , " nccl " , or " horovod " .
By default ( None ) , it uses statistics of the input tensor to normalize during training .
This is the standard way BatchNorm was implemented in most frameworks .
When set to " nccl " , this layer must be used under tensorpack ' s multi - GPU trainers .
It uses the aggregated statistics of the whole batch ( across all GPUs ) to normalize .
When set to " horovod " , this layer must be used under tensorpack ' s : class : ` HorovodTrainer ` .
It uses the aggregated statistics of the whole batch ( across all MPI ranks ) to normalize .
Note that on single machine this is significantly slower than the " nccl " implementation .
When enabled , per - GPU E [ x ] and E [ x ^ 2 ] among all GPUs are averaged to compute
global mean & variance . Therefore each GPU needs to have the same batch size .
The synchronization is based on the current variable scope + the name of the layer
( ` BatchNorm ( ' name ' , input ) ` ) . Therefore , you need to make sure that :
1 . The BatchNorm layer on different GPUs needs to have the same name , so that
statistics can be synchronized . If names do not match , this layer will hang .
2 . Different BatchNorm layers in one tower cannot share the same name .
3 . A BatchNorm layer needs to be executed for the same number of times by all GPUs .
If different GPUs execute one BatchNorm layer for different number of times
( e . g . , if some GPUs do not execute it ) , this layer may hang .
This option only has effect when ` training = = get _ current _ tower _ context ( ) . training = = True ` .
This option is also known as " Cross - GPU BatchNorm " as mentioned in :
` MegDet : A Large Mini - Batch Object Detector < https : / / arxiv . org / abs / 1711.07240 > ` _ .
Corresponding TF issue : https : / / github . com / tensorflow / tensorflow / issues / 18222.
When ` sync _ statistics ` is enabled , ` internal _ update ` will be set to True automatically .
This is to avoid running ` UPDATE _ OPS ` , which requires synchronization .
Variable Names :
* ` ` beta ` ` : the bias term . Will be zero - inited by default .
* ` ` gamma ` ` : the scale term . Will be one - inited by default .
* ` ` mean / EMA ` ` : the moving average of mean .
* ` ` variance / EMA ` ` : the moving average of variance .
Note :
Combinations of ` ` training ` ` and ` ` ctx . is _ training ` ` :
* ` ` training = = ctx . is _ training ` ` : standard BN , EMA are maintained during training
and used during inference . This is the default .
* ` ` training and not ctx . is _ training ` ` : still use batch statistics in inference .
* ` ` not training and ctx . is _ training ` ` : use EMA to normalize in
training . This is useful when you load a pre - trained BN and
don ' t want to fine tune the EMA . EMA will not be updated in
this case .""" | # parse shapes
data_format = get_data_format ( data_format , keras_mode = False )
shape = inputs . get_shape ( ) . as_list ( )
ndims = len ( shape )
assert ndims in [ 2 , 4 ] , ndims
if sync_statistics is not None :
sync_statistics = sync_statistics . lower ( )
assert sync_statistics in [ None , 'nccl' , 'horovod' ] , sync_statistics
if axis is None :
if ndims == 2 :
axis = 1
else :
axis = 1 if data_format == 'NCHW' else 3
assert axis in [ 1 , 3 ] , axis
num_chan = shape [ axis ]
# parse training / ctx
ctx = get_current_tower_context ( )
if training is None :
training = ctx . is_training
training = bool ( training )
TF_version = get_tf_version_tuple ( )
freeze_bn_backward = not training and ctx . is_training
if freeze_bn_backward :
assert TF_version >= ( 1 , 4 ) , "Fine tuning a BatchNorm model with fixed statistics needs TF>=1.4!"
if ctx . is_main_training_tower : # only warn in first tower
logger . warn ( "[BatchNorm] Using moving_mean/moving_variance in training." )
# Using moving _ mean / moving _ variance in training , which means we
# loaded a pre - trained BN and only fine - tuning the affine part .
if sync_statistics is None or not ( training and ctx . is_training ) :
coll_bk = backup_collection ( [ tf . GraphKeys . UPDATE_OPS ] )
with rename_get_variable ( { 'moving_mean' : 'mean/EMA' , 'moving_variance' : 'variance/EMA' } ) :
tf_args = dict ( axis = axis , momentum = momentum , epsilon = epsilon , center = center , scale = scale , beta_initializer = beta_initializer , gamma_initializer = gamma_initializer , # https : / / github . com / tensorflow / tensorflow / issues / 10857 # issuecomment - 410185429
fused = ( ndims == 4 and axis in [ 1 , 3 ] and not freeze_bn_backward ) , _reuse = tf . get_variable_scope ( ) . reuse )
if TF_version >= ( 1 , 5 ) :
tf_args [ 'virtual_batch_size' ] = virtual_batch_size
else :
assert virtual_batch_size is None , "Feature not supported in this version of TF!"
use_fp16 = inputs . dtype == tf . float16
if use_fp16 : # non - fused does not support fp16 ; fused does not support all layouts .
# we made our best guess here
tf_args [ 'fused' ] = True
layer = tf . layers . BatchNormalization ( ** tf_args )
xn = layer . apply ( inputs , training = training , scope = tf . get_variable_scope ( ) )
# maintain EMA only on one GPU is OK , even in replicated mode .
# because during training , EMA isn ' t used
if ctx . is_main_training_tower :
for v in layer . non_trainable_variables :
if isinstance ( v , tf . Variable ) :
tf . add_to_collection ( tf . GraphKeys . MODEL_VARIABLES , v )
if not ctx . is_main_training_tower or internal_update :
restore_collection ( coll_bk )
if training and internal_update :
assert layer . updates
with tf . control_dependencies ( layer . updates ) :
ret = tf . identity ( xn , name = 'output' )
else :
ret = tf . identity ( xn , name = 'output' )
vh = ret . variables = VariableHolder ( moving_mean = layer . moving_mean , mean = layer . moving_mean , # for backward - compatibility
moving_variance = layer . moving_variance , variance = layer . moving_variance )
# for backward - compatibility
if scale :
vh . gamma = layer . gamma
if center :
vh . beta = layer . beta
else :
red_axis = [ 0 ] if ndims == 2 else ( [ 0 , 2 , 3 ] if axis == 1 else [ 0 , 1 , 2 ] )
new_shape = None
# don ' t need to reshape unless . . .
if ndims == 4 and axis == 1 :
new_shape = [ 1 , num_chan , 1 , 1 ]
batch_mean = tf . reduce_mean ( inputs , axis = red_axis )
batch_mean_square = tf . reduce_mean ( tf . square ( inputs ) , axis = red_axis )
if sync_statistics == 'nccl' :
num_dev = ctx . total
if num_dev == 1 :
logger . warn ( "BatchNorm(sync_statistics='nccl') is used with only one tower!" )
else :
assert six . PY2 or TF_version >= ( 1 , 10 ) , "Cross-GPU BatchNorm is only supported in TF>=1.10 ." "Upgrade TF or apply this patch manually: https://github.com/tensorflow/tensorflow/pull/20360"
if TF_version <= ( 1 , 12 ) :
try :
from tensorflow . contrib . nccl . python . ops . nccl_ops import _validate_and_load_nccl_so
except Exception :
pass
else :
_validate_and_load_nccl_so ( )
from tensorflow . contrib . nccl . ops import gen_nccl_ops
else :
from tensorflow . python . ops import gen_nccl_ops
shared_name = re . sub ( 'tower[0-9]+/' , '' , tf . get_variable_scope ( ) . name )
batch_mean = gen_nccl_ops . nccl_all_reduce ( input = batch_mean , reduction = 'sum' , num_devices = num_dev , shared_name = shared_name + '_NCCL_mean' ) * ( 1.0 / num_dev )
batch_mean_square = gen_nccl_ops . nccl_all_reduce ( input = batch_mean_square , reduction = 'sum' , num_devices = num_dev , shared_name = shared_name + '_NCCL_mean_square' ) * ( 1.0 / num_dev )
elif sync_statistics == 'horovod' : # Require https : / / github . com / uber / horovod / pull / 331
import horovod . tensorflow as hvd
if hvd . size ( ) == 1 :
logger . warn ( "BatchNorm(sync_statistics='horovod') is used with only one process!" )
else :
import horovod
hvd_version = tuple ( map ( int , horovod . __version__ . split ( '.' ) ) )
assert hvd_version >= ( 0 , 13 , 6 ) , "sync_statistics=horovod needs horovod>=0.13.6 !"
batch_mean = hvd . allreduce ( batch_mean , average = True )
batch_mean_square = hvd . allreduce ( batch_mean_square , average = True )
batch_var = batch_mean_square - tf . square ( batch_mean )
batch_mean_vec = batch_mean
batch_var_vec = batch_var
beta , gamma , moving_mean , moving_var = get_bn_variables ( num_chan , scale , center , beta_initializer , gamma_initializer )
if new_shape is not None :
batch_mean = tf . reshape ( batch_mean , new_shape )
batch_var = tf . reshape ( batch_var , new_shape )
# Using fused _ batch _ norm ( is _ training = False ) is actually slightly faster ,
# but hopefully this call will be JITed in the future .
xn = tf . nn . batch_normalization ( inputs , batch_mean , batch_var , tf . reshape ( beta , new_shape ) , tf . reshape ( gamma , new_shape ) , epsilon )
else :
xn = tf . nn . batch_normalization ( inputs , batch_mean , batch_var , beta , gamma , epsilon )
if ctx . is_main_training_tower :
ret = update_bn_ema ( xn , batch_mean_vec , batch_var_vec , moving_mean , moving_var , momentum )
else :
ret = tf . identity ( xn , name = 'output' )
vh = ret . variables = VariableHolder ( moving_mean = moving_mean , mean = moving_mean , # for backward - compatibility
moving_variance = moving_var , variance = moving_var )
# for backward - compatibility
if scale :
vh . gamma = gamma
if center :
vh . beta = beta
return ret |
def is_production ( flag_name : str = 'PRODUCTION' , strict : bool = False ) :
"""Reads env ` ` PRODUCTION ` ` variable as a boolean .
: param flag _ name : environment variable name
: param strict : raise a ` ` ValueError ` ` if variable does not look like a normal boolean
: return : ` ` True ` ` if has truthy ` ` PRODUCTION ` ` env , ` ` False ` ` otherwise""" | return env_bool_flag ( flag_name , strict = strict ) |
def stop ( self ) :
"""Request thread to stop .
Does not wait for actual termination ( use join ( ) method ) .""" | if self . is_alive ( ) :
self . _can_run = False
self . _stop_event . set ( )
self . _profiler . total_time += time ( ) - self . _start_time
self . _start_time = None |
def summary ( self ) :
"""Summary of model definition for labeling . Intended to be somewhat
readable but unique to a given model definition .""" | if self . features is not None :
feature_count = len ( self . features )
else :
feature_count = 0
feature_hash = 'feathash:' + str ( hash ( tuple ( self . features ) ) )
return ( str ( self . estimator ) , feature_count , feature_hash , self . target ) |
def p_config ( self , p ) :
"""config : config contents
| contents""" | n = len ( p )
if n == 3 :
p [ 0 ] = p [ 1 ] + [ p [ 2 ] ]
elif n == 2 :
p [ 0 ] = [ 'config' , p [ 1 ] ] |
def describe_alarm_history ( self , alarm_name = None , start_date = None , end_date = None , max_records = None , history_item_type = None , next_token = None ) :
"""Retrieves history for the specified alarm . Filter alarms by date range
or item type . If an alarm name is not specified , Amazon CloudWatch
returns histories for all of the owner ' s alarms .
Amazon CloudWatch retains the history of deleted alarms for a period of
six weeks . If an alarm has been deleted , its history can still be
queried .
: type alarm _ name : string
: param alarm _ name : The name of the alarm .
: type start _ date : datetime
: param start _ date : The starting date to retrieve alarm history .
: type end _ date : datetime
: param end _ date : The starting date to retrieve alarm history .
: type history _ item _ type : string
: param history _ item _ type : The type of alarm histories to retreive
( ConfigurationUpdate | StateUpdate | Action )
: type max _ records : int
: param max _ records : The maximum number of alarm descriptions
to retrieve .
: type next _ token : string
: param next _ token : The token returned by a previous call to indicate
that there is more data .
: rtype list""" | params = { }
if alarm_name :
params [ 'AlarmName' ] = alarm_name
if start_date :
params [ 'StartDate' ] = start_date . isoformat ( )
if end_date :
params [ 'EndDate' ] = end_date . isoformat ( )
if history_item_type :
params [ 'HistoryItemType' ] = history_item_type
if max_records :
params [ 'MaxRecords' ] = max_records
if next_token :
params [ 'NextToken' ] = next_token
return self . get_list ( 'DescribeAlarmHistory' , params , [ ( 'member' , AlarmHistoryItem ) ] ) |
def tryImportModule ( self , name ) :
"""Imports the module and sets version information
If the module cannot be imported , the version is set to empty values .""" | self . _name = name
try :
import importlib
self . _module = importlib . import_module ( name )
except ImportError :
self . _module = None
self . _version = ''
self . _packagePath = ''
else :
if self . _versionAttribute :
self . _version = getattr ( self . _module , self . _versionAttribute , '???' )
if self . _pathAttribute :
self . _packagePath = getattr ( self . _module , self . _pathAttribute , '???' ) |
def backend_status ( self , backend = 'ibmqx4' , access_token = None , user_id = None ) :
"""Get the status of a chip""" | if access_token :
self . req . credential . set_token ( access_token )
if user_id :
self . req . credential . set_user_id ( user_id )
backend_type = self . _check_backend ( backend , 'status' )
if not backend_type :
raise BadBackendError ( backend )
status = self . req . get ( '/Backends/' + backend_type + '/queue/status' , with_token = False )
ret = { }
if 'state' in status :
ret [ 'available' ] = bool ( status [ 'state' ] )
if 'busy' in status :
ret [ 'busy' ] = bool ( status [ 'busy' ] )
if 'lengthQueue' in status :
ret [ 'pending_jobs' ] = status [ 'lengthQueue' ]
ret [ 'backend' ] = backend_type
return ret |
def infer_type ( data ) :
"""Infer the type of objects returned by indicators .
infer _ type returns :
- ' scalar ' for a number or None ,
- ' summarystats ' for a SummaryStats object ,
- ' distribution _ scalar ' for a list of scalars ,
- ' distribution _ summarystats ' for a list of SummaryStats objects""" | if isinstance ( data , ( type ( None ) , numbers . Number ) ) :
return 'scalar'
if isinstance ( data , SummaryStats ) :
return 'summarystats'
if hasattr ( data , "__len__" ) : # list or numpy array
data = [ x for x in data if x is not None ]
if len ( data ) == 0 or isinstance ( data [ 0 ] , numbers . Number ) :
return 'distribution_scalar'
if isinstance ( data [ 0 ] , SummaryStats ) :
return 'distribution_summarystats'
raise TypeError ( "{} is not a valid input. It should be a number, a SummaryStats " "object, or None" . format ( data [ 0 ] ) )
raise TypeError ( "{} is not a valid input. It should be a number, a SummaryStats " "object, or a list" . format ( data ) ) |
def lock ( self , page ) :
"""Locks * page * .""" | result = self . _dokuwiki . send ( 'dokuwiki.setLocks' , lock = [ page ] , unlock = [ ] )
if result [ 'lockfail' ] :
raise DokuWikiError ( 'unable to lock page' ) |
def start ( self ) :
'''Create a process in which the isolated code will be run .''' | assert self . _client is None
logger . debug ( 'IsolationContext[%d] starting' , id ( self ) )
# Create the queues
request_queue = multiprocessing . Queue ( )
response_queue = multiprocessing . Queue ( )
# Launch the server process
server = Server ( request_queue , response_queue )
# Do not keep a reference to this object !
server_process = multiprocessing . Process ( target = server . loop )
server_process . start ( )
# Create a client to talk to the server
self . _client = Client ( server_process , request_queue , response_queue ) |
def strip_brackets ( text , brackets = None ) :
"""Strip brackets and what is inside brackets from text .
. . note : :
If the text contains only one opening bracket , the rest of the text
will be ignored . This is a feature , not a bug , as we want to avoid that
this function raises errors too easily .""" | res = [ ]
for c , type_ in _tokens ( text , brackets = brackets ) :
if type_ == TextType . text :
res . append ( c )
return '' . join ( res ) . strip ( ) |
def check_folder_exists ( project , path , folder_name ) :
''': param project : project id
: type project : string
: param path : path to where we should look for the folder in question
: type path : string
: param folder _ name : name of the folder in question
: type folder _ name : string
: returns : A boolean True or False whether the folder exists at the specified path
: type : boolean
: raises : : exc : ' ResolutionError ' if dxpy . api . container _ list _ folder raises an exception
This function returns a boolean value that indicates whether a folder of the
specified name exists at the specified path
Note : this function will NOT work on the root folder case , i . e . ' / ' ''' | if folder_name is None or path is None :
return False
try :
folder_list = dxpy . api . container_list_folder ( project , { "folder" : path , "only" : "folders" } )
except dxpy . exceptions . DXAPIError as e :
if e . name == 'ResourceNotFound' :
raise ResolutionError ( str ( e . msg ) )
else :
raise e
target_folder = path + '/' + folder_name
# sanitize input if necessary
target_folder , _skip = clean_folder_path ( target_folder , 'folder' )
# Check that folder name exists in return from list folder API call
return target_folder in folder_list [ 'folders' ] |
def _cho_solve_AATI ( A , rho , b , c , lwr , check_finite = True ) :
"""Patched version of : func : ` sporco . linalg . cho _ solve _ AATI ` .""" | N , M = A . shape
if N >= M :
x = ( b - _cho_solve ( ( c , lwr ) , b . dot ( A ) . T , check_finite = check_finite ) . T . dot ( A . T ) ) / rho
else :
x = _cho_solve ( ( c , lwr ) , b . T , check_finite = check_finite ) . T
return x |
def set_field ( self , field , rate , approach = 'linear' , mode = 'persistent' , wait_for_stability = True , delay = 1 ) :
"""Sets the magnetic field .
: param field : The target field in Oersted .
. . note : : The conversion is 1 Oe = 0.1 mT .
: param rate : The field rate in Oersted per minute .
: param approach : The approach mode , either ' linear ' , ' no overshoot ' or
' oscillate ' .
: param mode : The state of the magnet at the end of the charging
process , either ' persistent ' or ' driven ' .
: param wait _ for _ stability : If ` True ` , the function call blocks until
the target field is reached and stable .
: param delay : Specifies the frequency in seconds how often the magnet
status is checked . ( This has no effect if wait _ for _ stability is
` False ` ) .""" | self . target_field = field , rate , approach , mode
if wait_for_stability and self . system_status [ 'magnet' ] . startswith ( 'persist' ) : # Wait until the persistent switch heats up .
time . sleep ( self . magnet_config [ 5 ] )
while wait_for_stability :
status = self . system_status [ 'magnet' ]
if status in ( 'persistent, stable' , 'driven, stable' ) :
break
time . sleep ( delay ) |
def _getresult ( self , captcha_id , timeout = None ) :
"""Poll until a captcha ` captcha _ id ` has been solved , or
the poll times out . The timeout is the default 60 seconds ,
unless overridden by ` timeout ` ( which is in seconds ) .
Polling is done every 8 seconds .""" | timeout = timeout
if not timeout :
timeout = self . waittime
poll_interval = 8
start = time . time ( )
for x in range ( int ( timeout / poll_interval ) + 1 ) :
self . log . info ( "Sleeping %s seconds (poll %s of %s, elapsed %0.2fs of %0.2f)." , poll_interval , x , int ( timeout / poll_interval ) + 1 , ( time . time ( ) - start ) , timeout , )
time . sleep ( poll_interval )
try :
resp = self . doGet ( 'result' , { 'action' : 'get' , 'key' : self . api_key , 'json' : True , 'id' : captcha_id , } )
self . log . info ( "Call returned success!" )
return resp
except exc . CaptchaNotReady :
self . log . info ( "Captcha not ready. Waiting longer." )
raise exc . CaptchaSolverFailure ( "Solving captcha timed out after %s seconds!" % ( time . time ( ) - start , ) ) |
def pdbechem_parse ( download = False , filename = '/kortemmelab/shared/mirror/PDBeChem/chem_comp.xml' ) :
'''This is slower than using SAX but much easier to write / read . If you need this to perform well , rewrite with SAX .''' | xml_data = None
if download : # this URL will go out of date
try :
resource = [ 'ftp.ebi.ac.uk' , '/pub/databases/msd/pdbechem/chem_comp.xml' ]
xml_data = get_insecure_resource ( resource [ 0 ] , resource [ 1 ] )
except FTPException550 , e :
colortext . error ( "This resource ftp://%s appears to be missing. The link may need to be updated in the script." % "" . join ( resource ) )
raise
except Exception , e :
colortext . error ( "An error occurred downloading ftp://%s:\n%s" % ( "" . join ( resource ) , str ( e ) ) )
raise
else :
xml_data = read_file ( filename )
_dom = parseString ( xml_data )
main_tag = _dom . getElementsByTagName ( "chemCompList" )
assert ( len ( main_tag ) == 1 )
main_tag = main_tag [ 0 ]
entries = main_tag . getElementsByTagName ( "chemComp" )
parsed_dict = { }
properties = [ "id" , "name" , "formula" , "systematicName" , "stereoSmiles" , "nonStereoSmiles" , "InChi" ]
for e in entries :
d = { }
for p in properties :
t = e . getElementsByTagName ( p )
assert ( len ( t ) <= 1 )
if p == "id" :
assert ( len ( t ) == 1 and t [ 0 ] . childNodes )
if len ( t ) :
if t [ 0 ] . childNodes :
d [ p ] = t [ 0 ] . firstChild . nodeValue
else :
d [ p ] = None
else :
d [ p ] = None
parsed_dict [ d [ 'id' ] ] = d
return parsed_dict |
def _pretend_to_run ( self , migration , method ) :
"""Pretend to run the migration .
: param migration : The migration
: type migration : orator . migrations . migration . Migration
: param method : The method to execute
: type method : str""" | self . _note ( "" )
names = [ ]
for query in self . _get_queries ( migration , method ) :
name = migration . __class__ . __name__
bindings = None
if isinstance ( query , tuple ) :
query , bindings = query
query = highlight ( query , SqlLexer ( ) , CommandFormatter ( ) ) . strip ( )
if bindings :
query = ( query , bindings )
if name not in names :
self . _note ( "[<info>{}</info>]" . format ( name ) )
names . append ( name )
self . _note ( query ) |
def parse_from_template ( template_name ) :
"""Return an element loaded from the XML in the template file identified by
* template _ name * .""" | thisdir = os . path . split ( __file__ ) [ 0 ]
filename = os . path . join ( thisdir , '..' , 'templates' , '%s.xml' % template_name )
with open ( filename , 'rb' ) as f :
xml = f . read ( )
return parse_xml ( xml ) |
def save ( self , designName = "" ) : # type : ( ASaveDesign ) - > None
"""Save the current design to file""" | self . try_stateful_function ( ss . SAVING , ss . READY , self . do_save , designName ) |
def get_action ( self , create = False ) :
"""Get the shared widget action for this widget .
This API is used to support widgets in tool bars and menus .
Parameters
create : bool , optional
Whether to create the action if it doesn ' t already exist .
The default is False .
Returns
result : QWidgetAction or None
The cached widget action or None , depending on arguments .""" | action = self . _widget_action
if action is None and create :
action = self . _widget_action = QWidgetAction ( None )
action . setDefaultWidget ( self . widget )
return action |
def to_dict ( self , model_run ) :
"""Create a Json - like dictionary for a model run object . Extends the
basic object with run state , arguments , and optional prediction results
or error descriptions .
Parameters
model _ run : PredictionHandle
Returns
( JSON )
Json - like object , i . e . , dictionary .""" | # Get the basic Json object from the super class
json_obj = super ( DefaultModelRunManager , self ) . to_dict ( model_run )
# Add run state
json_obj [ 'state' ] = ModelRunState . to_dict ( model_run . state )
# Add run scheduling Timestamps
json_obj [ 'schedule' ] = model_run . schedule
# Add experiment information
json_obj [ 'experiment' ] = model_run . experiment_id
# Add model information
json_obj [ 'model' ] = model_run . model_id
# Transform dictionary of attributes into list of key - value pairs .
json_obj [ 'arguments' ] = attribute . attributes_to_dict ( model_run . arguments )
# Include attachments
json_obj [ 'attachments' ] = [ attachment . to_dict ( ) for attachment in model_run . attachments . values ( ) ]
return json_obj |
def _cell_to_python ( cell ) :
"""Convert a PyOpenXL ' s ` Cell ` object to the corresponding Python object .""" | data_type , value = cell . data_type , cell . value
if type ( cell ) is EmptyCell :
return None
elif data_type == "f" and value == "=TRUE()" :
return True
elif data_type == "f" and value == "=FALSE()" :
return False
elif cell . number_format . lower ( ) == "yyyy-mm-dd" :
return str ( value ) . split ( " 00:00:00" ) [ 0 ]
elif cell . number_format . lower ( ) == "yyyy-mm-dd hh:mm:ss" :
return str ( value ) . split ( "." ) [ 0 ]
elif cell . number_format . endswith ( "%" ) and isinstance ( value , Number ) :
value = Decimal ( str ( value ) )
return "{:%}" . format ( value )
elif value is None :
return ""
else :
return value |
def populateImagesFromSurveys ( self , surveys = dss2 + twomass ) :
'''Load images from archives .''' | # what ' s the coordinate center ?
coordinatetosearch = '{0.ra.deg} {0.dec.deg}' . format ( self . center )
# query sky view for those images
paths = astroquery . skyview . SkyView . get_images ( position = coordinatetosearch , radius = self . radius , survey = surveys )
# populate the images for each of these
self . images = [ Image ( p [ 0 ] , s ) for p , s in zip ( paths , surveys ) ] |
def intersperse ( lis , value ) :
"""Put value between each existing item in list .
Parameters
lis : list
List to intersperse .
value : object
Value to insert .
Returns
list
interspersed list""" | out = [ value ] * ( len ( lis ) * 2 - 1 )
out [ 0 : : 2 ] = lis
return out |
def refine_time_offset ( image_list , frame_timestamps , rotation_sequence , rotation_timestamps , camera_matrix , readout_time ) :
"""Refine a time offset between camera and IMU using rolling shutter aware optimization .
To refine the time offset using this function , you must meet the following constraints
1 ) The data must already be roughly aligned . Only a few image frames of error
is allowed .
2 ) The images * must * have been captured by a * rolling shutter * camera .
This function finds a refined offset using optimization .
Points are first tracked from the start to the end of the provided images .
Then an optimization function looks at the reprojection error of the tracked points
given the IMU - data and the refined offset .
The found offset * d * is such that you want to perform the following time update
new _ frame _ timestamps = frame _ timestamps + d
Parameters
image _ list : list of ndarray
A list of images to perform tracking on . High quality tracks are required ,
so make sure the sequence you choose is easy to track in .
frame _ timestamps : ndarray
Timestamps of image _ list
rotation _ sequence : ( 4 , N ) ndarray
Absolute rotations as a sequence of unit quaternions ( first element is scalar ) .
rotation _ timestamps : ndarray
Timestamps of rotation _ sequence
camera _ matrix : ( 3,3 ) ndarray
The internal camera calibration matrix of the camera .
readout _ time : float
The readout time of the camera .
Returns
offset : float
A refined offset that aligns the image data with the rotation data .""" | # ) Track points
max_corners = 200
quality_level = 0.07
min_distance = 5
max_tracks = 20
initial_points = cv2 . goodFeaturesToTrack ( image_list [ 0 ] , max_corners , quality_level , min_distance )
( points , status ) = tracking . track_retrack ( image_list , initial_points )
# Prune to at most max _ tracks number of tracks , choose randomly
track_id_list = np . random . permutation ( points . shape [ 0 ] ) [ : max_tracks ]
rows , cols = image_list [ 0 ] . shape [ : 2 ]
row_delta_time = readout_time / rows
num_tracks , num_frames , _ = points . shape
K = np . matrix ( camera_matrix )
def func_to_optimize ( td , * args ) :
res = 0.0
N = 0
for frame_idx in range ( num_frames - 1 ) :
for track_id in track_id_list :
p1 = points [ track_id , frame_idx , : ] . reshape ( ( - 1 , 1 ) )
p2 = points [ track_id , frame_idx + 1 , : ] . reshape ( ( - 1 , 1 ) )
t1 = frame_timestamps [ frame_idx ] + ( p1 [ 1 ] - 1 ) * row_delta_time + td
t2 = frame_timestamps [ frame_idx + 1 ] + ( p2 [ 1 ] - 1 ) * row_delta_time + td
t1 = float ( t1 )
t2 = float ( t2 )
q1 = IMU . rotation_at_time ( t1 , rotation_timestamps , rotation_sequence )
q2 = IMU . rotation_at_time ( t2 , rotation_timestamps , rotation_sequence )
R1 = rotations . quat_to_rotation_matrix ( q1 )
R2 = rotations . quat_to_rotation_matrix ( q2 )
p1_rec = K . dot ( R1 . T ) . dot ( R2 ) . dot ( K . I ) . dot ( np . vstack ( ( p2 , 1 ) ) )
if p1_rec [ 2 ] == 0 :
continue
else :
p1_rec /= p1_rec [ 2 ]
res += np . sum ( ( p1 - np . array ( p1_rec [ 0 : 2 ] ) ) ** 2 )
N += 1
return res / N
# Bounded Brent optimizer
t0 = time . time ( )
tolerance = 1e-4
# one tenth millisecond
( refined_offset , fval , ierr , numfunc ) = scipy . optimize . fminbound ( func_to_optimize , - 0.12 , 0.12 , xtol = tolerance , full_output = True )
t1 = time . time ( )
if ierr == 0 :
logger . info ( "Time offset found by brent optimizer: %.4f. Elapsed: %.2f seconds (%d function calls)" , refined_offset , t1 - t0 , numfunc )
else :
logger . error ( "Brent optimizer did not converge. Aborting!" )
raise Exception ( "Brent optimizer did not converge, when trying to refine offset." )
return refined_offset |
def pmt_with_id ( self , pmt_id ) :
"""Get PMT with global pmt _ id""" | try :
return self . pmts [ self . _pmt_index_by_pmt_id [ pmt_id ] ]
except KeyError :
raise KeyError ( "No PMT found for ID: {0}" . format ( pmt_id ) ) |
def lines_once ( cls , code , ** kwargs ) :
"""One - off code generation using : meth : ` lines ` .
If keyword args are provided , initialized using
: meth : ` with _ id _ processor ` .""" | if kwargs :
g = cls . with_id_processor ( )
g . _append_context ( kwargs )
else :
g = cls ( )
g . lines ( code )
return g . code |
def throw ( self , command ) :
"""Posts an exception ' s stacktrace string as returned by ' traceback . format _ exc ( ) ' in an ' except ' block .
: param command : The command object that holds all the necessary information from the remote process .""" | exception_message = '[Process {}{}]:\n{}' . format ( command . pid , ' - {}' . format ( command . process_title ) if command . process_title else '' , command . stacktrace )
message = self . get_format ( )
message = message . replace ( '{L}' , 'EXCEPTION' )
message = '{}\t{}\n' . format ( message , exception_message )
self . append_exception ( message )
# Redraw
self . changes_made = True
self . redraw ( )
self . log . critical ( '\t{}' . format ( exception_message ) ) |
def split_feature ( f , n ) :
"""Split an interval into ` n ` roughly equal portions""" | if not isinstance ( n , int ) :
raise ValueError ( 'n must be an integer' )
orig_feature = copy ( f )
step = ( f . stop - f . start ) / n
for i in range ( f . start , f . stop , step ) :
f = copy ( orig_feature )
start = i
stop = min ( i + step , orig_feature . stop )
f . start = start
f . stop = stop
yield f
if stop == orig_feature . stop :
break |
def _update ( self ) :
r"""Update
This method updates the current reconstruction
Notes
Implements algorithm 3 from [ K2018 ] _""" | # Step 4 from alg . 3
self . _grad . get_grad ( self . _x_old )
self . _u_new = self . _x_old - self . _beta * self . _grad . grad
# Step 5 from alg . 3
self . _t_new = 0.5 * ( 1 + np . sqrt ( 1 + 4 * self . _t_old ** 2 ) )
# Step 6 from alg . 3
t_shifted_ratio = ( self . _t_old - 1 ) / self . _t_new
sigma_t_ratio = self . _sigma * self . _t_old / self . _t_new
beta_xi_t_shifted_ratio = t_shifted_ratio * self . _beta / self . _xi
self . _z = - beta_xi_t_shifted_ratio * ( self . _x_old - self . _z )
self . _z += self . _u_new
self . _z += t_shifted_ratio * ( self . _u_new - self . _u_old )
self . _z += sigma_t_ratio * ( self . _u_new - self . _x_old )
# Step 7 from alg . 3
self . _xi = self . _beta * ( 1 + t_shifted_ratio + sigma_t_ratio )
# Step 8 from alg . 3
self . _x_new = self . _prox . op ( self . _z , extra_factor = self . _xi )
# Restarting and gamma - Decreasing
# Step 9 from alg . 3
self . _g_new = self . _grad . grad - ( self . _x_new - self . _z ) / self . _xi
# Step 10 from alg 3.
self . _y_new = self . _x_old - self . _beta * self . _g_new
# Step 11 from alg . 3
restart_crit = np . vdot ( - self . _g_new , self . _y_new - self . _y_old ) < 0
if restart_crit :
self . _t_new = 1
self . _sigma = 1
# Step 13 from alg . 3
elif np . vdot ( self . _g_new , self . _g_old ) < 0 :
self . _sigma *= self . _sigma_bar
# updating variables
self . _t_old = self . _t_new
np . copyto ( self . _u_old , self . _u_new )
np . copyto ( self . _x_old , self . _x_new )
np . copyto ( self . _g_old , self . _g_new )
np . copyto ( self . _y_old , self . _y_new )
# Test cost function for convergence .
if self . _cost_func :
self . converge = self . any_convergence_flag ( ) or self . _cost_func . get_cost ( self . _x_new ) |
def list_sinks ( self , page_size = None , page_token = None ) :
"""List sinks for the project associated with this client .
See
https : / / cloud . google . com / logging / docs / reference / v2 / rest / v2 / projects . sinks / list
: type page _ size : int
: param page _ size :
Optional . The maximum number of sinks in each page of results from
this request . Non - positive values are ignored . Defaults to a
sensible value set by the API .
: type page _ token : str
: param page _ token :
Optional . If present , return the next batch of sinks , using the
value , which must correspond to the ` ` nextPageToken ` ` value
returned in the previous response . Deprecated : use the ` ` pages ` `
property of the returned iterator instead of manually passing the
token .
: rtype : : class : ` ~ google . api _ core . page _ iterator . Iterator `
: returns : Iterator of
: class : ` ~ google . cloud . logging . sink . Sink `
accessible to the current client .""" | return self . sinks_api . list_sinks ( self . project , page_size , page_token ) |
def _registered ( self ) :
"""A optional boolean property indidcating whether this job store is registered . The
registry is the authority on deciding if a job store exists or not . If True , this job
store exists , if None the job store is transitioning from True to False or vice versa ,
if False the job store doesn ' t exist .
: type : bool | None""" | # The weird mapping of the SDB item attribute value to the property value is due to
# backwards compatibility . ' True ' becomes True , that ' s easy . Toil < 3.3.0 writes this at
# the end of job store creation . Absence of either the registry , the item or the
# attribute becomes False , representing a truly absent , non - existing job store . An
# attribute value of ' False ' , which is what Toil < 3.3.0 writes at the * beginning * of job
# store destruction , indicates a job store in transition , reflecting the fact that 3.3.0
# may leak buckets or domains even though the registry reports ' False ' for them . We
# can ' t handle job stores that were partially created by 3.3.0 , though .
registry_domain = self . _bindDomain ( domain_name = 'toil-registry' , create = False , block = False )
if registry_domain is None :
return False
else :
for attempt in retry_sdb ( ) :
with attempt :
attributes = registry_domain . get_attributes ( item_name = self . namePrefix , attribute_name = 'exists' , consistent_read = True )
try :
exists = attributes [ 'exists' ]
except KeyError :
return False
else :
if exists == 'True' :
return True
elif exists == 'False' :
return None
else :
assert False |
def run_instances ( self , image_id , min_count = 1 , max_count = 1 , key_name = None , security_groups = None , user_data = None , addressing_type = None , instance_type = 'm1.small' , placement = None , kernel_id = None , ramdisk_id = None , monitoring_enabled = False , subnet_id = None , block_device_map = None , disable_api_termination = False , instance_initiated_shutdown_behavior = None , private_ip_address = None , placement_group = None , client_token = None , security_group_ids = None ) :
"""Runs an image on EC2.
: type image _ id : string
: param image _ id : The ID of the image to run
: type min _ count : int
: param min _ count : The minimum number of instances to launch
: type max _ count : int
: param max _ count : The maximum number of instances to launch
: type key _ name : string
: param key _ name : The name of the key pair with which to launch instances
: type security _ groups : list of strings
: param security _ groups : The names of the security groups with which to
associate instances
: type user _ data : string
: param user _ data : The user data passed to the launched instances
: type instance _ type : string
: param instance _ type : The type of instance to run :
* m1 . small
* m1 . large
* m1 . xlarge
* c1 . medium
* c1 . xlarge
* m2 . xlarge
* m2.2xlarge
* m2.4xlarge
* cc1.4xlarge
* t1 . micro
: type placement : string
: param placement : The availability zone in which to launch the instances
: type kernel _ id : string
: param kernel _ id : The ID of the kernel with which to launch the
instances
: type ramdisk _ id : string
: param ramdisk _ id : The ID of the RAM disk with which to launch the
instances
: type monitoring _ enabled : bool
: param monitoring _ enabled : Enable CloudWatch monitoring on the instance .
: type subnet _ id : string
: param subnet _ id : The subnet ID within which to launch the instances
for VPC .
: type private _ ip _ address : string
: param private _ ip _ address : If you ' re using VPC , you can optionally use
this parameter to assign the instance a
specific available IP address from the
subnet ( e . g . , 10.0.0.25 ) .
: type block _ device _ map : : class : ` boto . ec2 . blockdevicemapping . BlockDeviceMapping `
: param block _ device _ map : A BlockDeviceMapping data structure
describing the EBS volumes associated
with the Image .
: type disable _ api _ termination : bool
: param disable _ api _ termination : If True , the instances will be locked
and will not be able to be terminated
via the API .
: type instance _ initiated _ shutdown _ behavior : string
: param instance _ initiated _ shutdown _ behavior : Specifies whether the
instance stops or
terminates on
instance - initiated
shutdown .
Valid values are :
* stop
* terminate
: type placement _ group : string
: param placement _ group : If specified , this is the name of the placement
group in which the instance ( s ) will be launched .
: type client _ token : string
: param client _ token : Unique , case - sensitive identifier you provide
to ensure idempotency of the request .
Maximum 64 ASCII characters
: rtype : Reservation
: return : The : class : ` boto . ec2 . instance . Reservation ` associated with
the request for machines
: type security _ group _ ids : list of strings
: param security _ group _ ids : The ID of the VPC security groups with
which to associate instances""" | params = { 'ImageId' : image_id , 'MinCount' : min_count , 'MaxCount' : max_count }
if key_name :
params [ 'KeyName' ] = key_name
if security_group_ids :
l = [ ]
for group in security_group_ids :
if isinstance ( group , SecurityGroup ) :
l . append ( group . id )
else :
l . append ( group )
self . build_list_params ( params , l , 'SecurityGroupId' )
if security_groups :
l = [ ]
for group in security_groups :
if isinstance ( group , SecurityGroup ) :
l . append ( group . name )
else :
l . append ( group )
self . build_list_params ( params , l , 'SecurityGroup' )
if user_data :
params [ 'UserData' ] = base64 . b64encode ( user_data )
if addressing_type :
params [ 'AddressingType' ] = addressing_type
if instance_type :
params [ 'InstanceType' ] = instance_type
if placement :
params [ 'Placement.AvailabilityZone' ] = placement
if placement_group :
params [ 'Placement.GroupName' ] = placement_group
if kernel_id :
params [ 'KernelId' ] = kernel_id
if ramdisk_id :
params [ 'RamdiskId' ] = ramdisk_id
if monitoring_enabled :
params [ 'Monitoring.Enabled' ] = 'true'
if subnet_id :
params [ 'SubnetId' ] = subnet_id
if private_ip_address :
params [ 'PrivateIpAddress' ] = private_ip_address
if block_device_map :
block_device_map . build_list_params ( params )
if disable_api_termination :
params [ 'DisableApiTermination' ] = 'true'
if instance_initiated_shutdown_behavior :
val = instance_initiated_shutdown_behavior
params [ 'InstanceInitiatedShutdownBehavior' ] = val
if client_token :
params [ 'ClientToken' ] = client_token
return self . get_object ( 'RunInstances' , params , Reservation , verb = 'POST' ) |
def validate_config_json ( pjson ) :
"""Takes the parsed JSON ( output from json . load ) from a configuration file
and checks it for common errors .""" | # Make sure that the root json is a dict
if type ( pjson ) is not dict :
raise ParseError ( "Configuration file should contain a single JSON object/dictionary. Instead got a %s." % type ( pjson ) )
# if ' configuration ' is present it should be a dict of strings and numbers .
# The ArgumentParser will do the rest of the validation .
configuration = pjson . get ( 'configuration' , { } )
if type ( configuration ) is not dict :
raise ParseError ( '''"configuration" object should be a dict, got %s instead.''' % type ( configuration ) )
# Make sure that the ' tests ' key is present
try :
tests = pjson [ "tests" ]
except KeyError :
raise KeyError ( "Configuration file requires 'tests' attribute." )
# Verify that ' options ' is a dict if present
options = pjson . get ( 'options' , { } )
if type ( options ) is not dict :
raise ParseError ( '"options" attribute must be a dictionary"' )
# We need to know whether - - user - defined - threads option is present to do
# the rest of the validation .
if '-u' in options . keys ( ) or '--user-defined-threads' in options . keys ( ) :
ConfigurationValidator . validate_user_threaded_json ( pjson )
else :
ConfigurationValidator . validate_auto_threaded_json ( pjson ) |
def setup ( self , app ) :
"""Initialize the plugin .
Fill the plugin ' s options from application .""" | self . app = app
for name , ptype in self . dependencies . items ( ) :
if name not in app . ps or not isinstance ( app . ps [ name ] , ptype ) :
raise PluginException ( 'Plugin `%s` requires for plugin `%s` to be installed to the application.' % ( self . name , ptype ) )
for oname , dvalue in self . defaults . items ( ) :
aname = ( '%s_%s' % ( self . name , oname ) ) . upper ( )
self . cfg . setdefault ( oname , app . cfg . get ( aname , dvalue ) )
app . cfg . setdefault ( aname , self . cfg [ oname ] ) |
def get_shelvesets ( self , request_data = None , top = None , skip = None ) :
"""GetShelvesets .
Return a collection of shallow shelveset references .
: param : class : ` < TfvcShelvesetRequestData > < azure . devops . v5_0 . tfvc . models . TfvcShelvesetRequestData > ` request _ data : name , owner , and maxCommentLength
: param int top : Max number of shelvesets to return
: param int skip : Number of shelvesets to skip
: rtype : [ TfvcShelvesetRef ]""" | query_parameters = { }
if request_data is not None :
if request_data . name is not None :
query_parameters [ 'requestData.name' ] = request_data . name
if request_data . owner is not None :
query_parameters [ 'requestData.owner' ] = request_data . owner
if request_data . max_comment_length is not None :
query_parameters [ 'requestData.maxCommentLength' ] = request_data . max_comment_length
if request_data . max_change_count is not None :
query_parameters [ 'requestData.maxChangeCount' ] = request_data . max_change_count
if request_data . include_details is not None :
query_parameters [ 'requestData.includeDetails' ] = request_data . include_details
if request_data . include_work_items is not None :
query_parameters [ 'requestData.includeWorkItems' ] = request_data . include_work_items
if request_data . include_links is not None :
query_parameters [ 'requestData.includeLinks' ] = request_data . include_links
if top is not None :
query_parameters [ '$top' ] = self . _serialize . query ( 'top' , top , 'int' )
if skip is not None :
query_parameters [ '$skip' ] = self . _serialize . query ( 'skip' , skip , 'int' )
response = self . _send ( http_method = 'GET' , location_id = 'e36d44fb-e907-4b0a-b194-f83f1ed32ad3' , version = '5.0' , query_parameters = query_parameters )
return self . _deserialize ( '[TfvcShelvesetRef]' , self . _unwrap_collection ( response ) ) |
def restore_type ( self , dtype , sample = None ) :
"""Restore type from Pandas""" | # Pandas types
if pdc . is_bool_dtype ( dtype ) :
return 'boolean'
elif pdc . is_datetime64_any_dtype ( dtype ) :
return 'datetime'
elif pdc . is_integer_dtype ( dtype ) :
return 'integer'
elif pdc . is_numeric_dtype ( dtype ) :
return 'number'
# Python types
if sample is not None :
if isinstance ( sample , ( list , tuple ) ) :
return 'array'
elif isinstance ( sample , datetime . date ) :
return 'date'
elif isinstance ( sample , isodate . Duration ) :
return 'duration'
elif isinstance ( sample , dict ) :
return 'object'
elif isinstance ( sample , six . string_types ) :
return 'string'
elif isinstance ( sample , datetime . time ) :
return 'time'
return 'string' |
def download_sdk ( self , rest_api_id , output_dir , api_gateway_stage = DEFAULT_STAGE_NAME , sdk_type = 'javascript' ) : # type : ( str , str , str , str ) - > None
"""Download an SDK to a directory .
This will generate an SDK and download it to the provided
` ` output _ dir ` ` . If you ' re using ` ` get _ sdk _ download _ stream ( ) ` ` ,
you have to handle downloading the stream and unzipping the
contents yourself . This method handles that for you .""" | zip_stream = self . get_sdk_download_stream ( rest_api_id , api_gateway_stage = api_gateway_stage , sdk_type = sdk_type )
tmpdir = tempfile . mkdtemp ( )
with open ( os . path . join ( tmpdir , 'sdk.zip' ) , 'wb' ) as f :
f . write ( zip_stream . read ( ) )
tmp_extract = os . path . join ( tmpdir , 'extracted' )
with zipfile . ZipFile ( os . path . join ( tmpdir , 'sdk.zip' ) ) as z :
z . extractall ( tmp_extract )
# The extract zip dir will have a single directory :
# [ ' apiGateway - js - sdk ' ]
dirnames = os . listdir ( tmp_extract )
if len ( dirnames ) == 1 :
full_dirname = os . path . join ( tmp_extract , dirnames [ 0 ] )
if os . path . isdir ( full_dirname ) :
final_dirname = 'chalice-%s-sdk' % sdk_type
full_renamed_name = os . path . join ( tmp_extract , final_dirname )
os . rename ( full_dirname , full_renamed_name )
shutil . move ( full_renamed_name , output_dir )
return
raise RuntimeError ( "The downloaded SDK had an unexpected directory structure: %s" % ( ', ' . join ( dirnames ) ) ) |
def set_slave_bus_bypass ( self , enable ) :
"""Put the aux i2c bus on the MPU - 6050 in bypass mode , thus connecting it to the main i2c bus directly
Dont forget to use wakeup ( ) or else the slave bus is unavailable
: param enable :
: return :""" | current = self . i2c_read_register ( 0x37 , 1 ) [ 0 ]
if enable :
current |= 0b00000010
else :
current &= 0b11111101
self . i2c_write_register ( 0x37 , current ) |
def chmod_and_retry ( func , path , exc_info ) :
"""Define an error handler to pass to shutil . rmtree .
On Windows , when a file is marked read - only as git likes to do , rmtree will
fail . To handle that , on errors try to make the file writable .
We want to retry most operations here , but listdir is one that we know will
be useless .""" | if func is os . listdir or os . name != 'nt' :
raise
os . chmod ( path , stat . S_IREAD | stat . S_IWRITE )
# on error , this will raise .
func ( path ) |
def detach ( self , filt , view = None ) :
"""Detach a filter .
Parameters
filt : object
The filter to detach .
view : instance of VisualView | None
The view to use .""" | if view is None :
self . _vshare . filters . remove ( filt )
for view in self . _vshare . views . keys ( ) :
filt . _detach ( view )
else :
view . _filters . remove ( filt )
filt . _detach ( view ) |
def getRemoteFile ( urlOrPath , destPath ) :
'''Fetches URL to local path or just returns absolute path .
: param urlOrPath : resource locator , generally URL or path
: param destPath : path to store the resource , usually a path on file system
: return : tuple having ( path , ' local ' / ' remote ' )''' | urlp = urlparse ( urlOrPath )
if urlp . scheme == '' :
return ( os . path . abspath ( urlOrPath ) , 'local' )
elif urlp . scheme not in ( 'http' , 'https' ) :
return ( urlOrPath , 'local' )
else :
filename = toFilename ( urlOrPath )
destPath = destPath + '/' + filename
log . info ( 'Retrieving %s to %s.' % ( urlOrPath , destPath ) )
try :
urlretrieve ( urlOrPath , destPath )
except IOError : # monkey patch fix for SSL / Windows per Tika - Python # 54
# https : / / github . com / chrismattmann / tika - python / issues / 54
import ssl
if hasattr ( ssl , '_create_unverified_context' ) :
ssl . _create_default_https_context = ssl . _create_unverified_context
# delete whatever we had there
if os . path . exists ( destPath ) and os . path . isfile ( destPath ) :
os . remove ( destPath )
urlretrieve ( urlOrPath , destPath )
return ( destPath , 'remote' ) |
def explain_permutation_importance ( estimator , vec = None , top = _TOP , target_names = None , # ignored
targets = None , # ignored
feature_names = None , feature_re = None , feature_filter = None , ) :
"""Return an explanation of PermutationImportance .
See : func : ` eli5 . explain _ weights ` for description of
` ` top ` ` , ` ` feature _ names ` ` , ` ` feature _ re ` ` and ` ` feature _ filter ` `
parameters .
` ` target _ names ` ` and ` ` targets ` ` parameters are ignored .
` ` vec ` ` is a vectorizer instance used to transform
raw features to the input of the estimator ( e . g . a fitted
CountVectorizer instance ) ; you can pass it instead of ` ` feature _ names ` ` .""" | coef = estimator . feature_importances_
coef_std = estimator . feature_importances_std_
return get_feature_importance_explanation ( estimator , vec , coef , coef_std = coef_std , feature_names = feature_names , feature_filter = feature_filter , feature_re = feature_re , top = top , description = DESCRIPTION_SCORE_DECREASE + estimator . caveats_ , is_regression = isinstance ( estimator . wrapped_estimator_ , RegressorMixin ) , ) |
def get_dc_keywords ( index_page ) :
"""Return list of ` keywords ` parsed from Dublin core .
Args :
index _ page ( str ) : Content of the page as UTF - 8 string
Returns :
list : List of : class : ` . SourceString ` objects .""" | keyword_lists = ( keyword_list . split ( ) for keyword_list in parse_meta ( index_page , "dc.keywords" , "DC" ) )
return [ SourceString ( keyword , source = "DC" ) for keyword in sum ( keyword_lists , [ ] ) # flattern the list
] |
def spliced_offset ( self , position ) :
"""Convert from an absolute chromosomal position to the offset into
this transcript " s spliced mRNA .
Position must be inside some exon ( otherwise raise exception ) .""" | # this code is performance sensitive , so switching from
# typechecks . require _ integer to a simpler assertion
assert type ( position ) == int , "Position argument must be an integer, got %s : %s" % ( position , type ( position ) )
if position < self . start or position > self . end :
raise ValueError ( "Invalid position: %d (must be between %d and %d)" % ( position , self . start , self . end ) )
# offset from beginning of unspliced transcript ( including introns )
unspliced_offset = self . offset ( position )
total_spliced_offset = 0
# traverse exons in order of their appearance on the strand
# Since absolute positions may decrease if on the negative strand ,
# we instead use unspliced offsets to get always increasing indices .
# Example :
# Exon Name : exon 1 exon 2
# Spliced Offset : 123456 789 . . .
# Intron vs . Exon : . . . iiiiieeeeeiiiiieeeeeiiiii . . .
for exon in self . exons :
exon_unspliced_start , exon_unspliced_end = self . offset_range ( exon . start , exon . end )
# If the relative position is not within this exon , keep a running
# total of the total exonic length - so - far .
# Otherwise , if the relative position is within an exon , get its
# offset into that exon by subtracting the exon " s relative start
# position from the relative position . Add that to the total exonic
# length - so - far .
if exon_unspliced_start <= unspliced_offset <= exon_unspliced_end : # all offsets are base 0 , can be used as indices into
# sequence string
exon_offset = unspliced_offset - exon_unspliced_start
return total_spliced_offset + exon_offset
else :
exon_length = len ( exon )
# exon _ end _ position - exon _ start _ position + 1
total_spliced_offset += exon_length
raise ValueError ( "Couldn't find position %d on any exon of %s" % ( position , self . id ) ) |
def configure ( self , * , handlers = None , levels = None , extra = None , activation = None ) :
"""Configure the core logger .
It should be noted that ` ` extra ` ` values set using this function are available across all
modules , so this is the best way to set overall default values .
Parameters
handlers : | list | of | dict | , optional
A list of each handler to be added . The list should contain dicts of params passed to
the | add | function as keyword arguments . If not ` ` None ` ` , all previously added
handlers are first removed .
levels : | list | of | dict | , optional
A list of each level to be added or updated . The list should contain dicts of params
passed to the | level | function as keyword arguments . This will never remove previously
created levels .
extra : | dict | , optional
A dict containing additional parameters bound to the core logger , useful to share
common properties if you call | bind | in several of your files modules . If not ` ` None ` ` ,
this will remove previously configured ` ` extra ` ` dict .
activation : | list | of | tuple | , optional
A list of ` ` ( name , state ) ` ` tuples which denotes which loggers should be enabled ( if
` state ` is ` ` True ` ` ) or disabled ( if ` state ` is ` ` False ` ` ) . The calls to | enable |
and | disable | are made accordingly to the list order . This will not modify previously
activated loggers , so if you need a fresh start preprend your list with ` ` ( " " , False ) ` `
or ` ` ( " " , True ) ` ` .
Returns
: class : ` list ` of : class : ` int `
A list containing the identifiers of added sinks ( if any ) .
Examples
> > > logger . configure (
. . . handlers = [
. . . dict ( sink = sys . stderr , format = " [ { time } ] { message } " ) ,
. . . dict ( sink = " file . log " , enqueue = True , serialize = True ) ,
. . . levels = [ dict ( name = " NEW " , no = 13 , icon = " ¤ " , color = " " ) ] ,
. . . extra = { " common _ to _ all " : " default " } ,
. . . activation = [ ( " my _ module . secret " , False ) , ( " another _ library . module " , True ) ] ,
[1 , 2]
> > > # Set a default " extra " dict to logger across all modules , without " bind ( ) "
> > > extra = { " context " : " foo " }
> > > logger . configure ( extra = extra )
> > > logger . start ( sys . stderr , format = " { extra [ context ] } - { message } " )
> > > logger . info ( " Context without bind " )
> > > # = > " foo - Context without bind "
> > > logger . bind ( context = " bar " ) . info ( " Suppress global context " )
> > > # = > " bar - Suppress global context " """ | if handlers is not None :
self . remove ( )
else :
handlers = [ ]
if levels is not None :
for params in levels :
self . level ( ** params )
if extra is not None :
with self . _lock :
self . _extra_class . clear ( )
self . _extra_class . update ( extra )
if activation is not None :
for name , state in activation :
if state :
self . enable ( name )
else :
self . disable ( name )
return [ self . add ( ** params ) for params in handlers ] |
def _render_round_end ( self , rewards : np . array ) -> None :
'''Prints round end information about ` rewards ` .''' | print ( "*********************************************************" )
print ( ">>> ROUND END" )
print ( "*********************************************************" )
total_reward = np . sum ( rewards )
print ( "==> Objective value = {}" . format ( total_reward ) )
print ( "==> rewards = {}" . format ( list ( rewards ) ) )
print ( ) |
def join_room ( room , sid = None , namespace = None ) :
"""Join a room .
This function puts the user in a room , under the current namespace . The
user and the namespace are obtained from the event context . This is a
function that can only be called from a SocketIO event handler . Example : :
@ socketio . on ( ' join ' )
def on _ join ( data ) :
username = session [ ' username ' ]
room = data [ ' room ' ]
join _ room ( room )
send ( username + ' has entered the room . ' , room = room )
: param room : The name of the room to join .
: param sid : The session id of the client . If not provided , the client is
obtained from the request context .
: param namespace : The namespace for the room . If not provided , the
namespace is obtained from the request context .""" | socketio = flask . current_app . extensions [ 'socketio' ]
sid = sid or flask . request . sid
namespace = namespace or flask . request . namespace
socketio . server . enter_room ( sid , room , namespace = namespace ) |
def channel ( self , rpc_timeout = 60 , lazy = False ) :
"""Open Channel .
: param int rpc _ timeout : Timeout before we give up waiting for an RPC
response from the server .
: raises AMQPInvalidArgument : Invalid Parameters
: raises AMQPChannelError : Raises if the channel encountered an error .
: raises AMQPConnectionError : Raises if the connection
encountered an error .""" | LOGGER . debug ( 'Opening a new Channel' )
if not compatibility . is_integer ( rpc_timeout ) :
raise AMQPInvalidArgument ( 'rpc_timeout should be an integer' )
elif self . is_closed :
raise AMQPConnectionError ( 'socket/connection closed' )
with self . lock :
channel_id = self . _get_next_available_channel_id ( )
channel = Channel ( channel_id , self , rpc_timeout , on_close_impl = self . _cleanup_channel )
self . _channels [ channel_id ] = channel
if not lazy :
channel . open ( )
LOGGER . debug ( 'Channel #%d Opened' , channel_id )
return self . _channels [ channel_id ] |
def create_key ( self , title , key ) :
"""Create a deploy key .
: param str title : ( required ) , title of key
: param str key : ( required ) , key text
: returns : : class : ` Key < github3 . users . Key > ` if successful , else None""" | json = None
if title and key :
data = { 'title' : title , 'key' : key }
url = self . _build_url ( 'keys' , base_url = self . _api )
json = self . _json ( self . _post ( url , data = data ) , 201 )
return Key ( json , self ) if json else None |
def _connected ( self , link_uri ) :
"""This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded .""" | print ( 'Connected to %s' % link_uri )
mems = self . _cf . mem . get_mems ( MemoryElement . TYPE_I2C )
print ( 'Found {} EEPOM(s)' . format ( len ( mems ) ) )
if len ( mems ) > 0 :
print ( 'Writing default configuration to' ' memory {}' . format ( mems [ 0 ] . id ) )
elems = mems [ 0 ] . elements
elems [ 'version' ] = 1
elems [ 'pitch_trim' ] = 0.0
elems [ 'roll_trim' ] = 0.0
elems [ 'radio_channel' ] = 80
elems [ 'radio_speed' ] = 0
elems [ 'radio_address' ] = 0xE7E7E7E7E7
mems [ 0 ] . write_data ( self . _data_written ) |
def check_array ( array , accept_sparse = None , dtype = "numeric" , order = None , copy = False , force_all_finite = True , ensure_2d = True , allow_nd = False , ensure_min_samples = 1 , ensure_min_features = 1 , warn_on_dtype = False ) :
"""Input validation on an array , list , sparse matrix or similar .
By default , the input is converted to an at least 2nd numpy array .
If the dtype of the array is object , attempt converting to float ,
raising on failure .
Parameters
array : object
Input object to check / convert .
accept _ sparse : string , list of string or None ( default = None )
String [ s ] representing allowed sparse matrix formats , such as ' csc ' ,
' csr ' , etc . None means that sparse matrix input will raise an error .
If the input is sparse but not in the allowed format , it will be
converted to the first listed format .
dtype : string , type , list of types or None ( default = " numeric " )
Data type of result . If None , the dtype of the input is preserved .
If " numeric " , dtype is preserved unless array . dtype is object .
If dtype is a list of types , conversion on the first type is only
performed if the dtype of the input is not in the list .
order : ' F ' , ' C ' or None ( default = None )
Whether an array will be forced to be fortran or c - style .
copy : boolean ( default = False )
Whether a forced copy will be triggered . If copy = False , a copy might
be triggered by a conversion .
force _ all _ finite : boolean ( default = True )
Whether to raise an error on np . inf and np . nan in X .
ensure _ 2d : boolean ( default = True )
Whether to make X at least 2d .
allow _ nd : boolean ( default = False )
Whether to allow X . ndim > 2.
ensure _ min _ samples : int ( default = 1)
Make sure that the array has a minimum number of samples in its first
axis ( rows for a 2D array ) . Setting to 0 disables this check .
ensure _ min _ features : int ( default = 1)
Make sure that the 2D array has some minimum number of features
( columns ) . The default value of 1 rejects empty datasets .
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ` ` ensure _ 2d ` ` is True . Setting to 0
disables this check .
warn _ on _ dtype : boolean ( default = False )
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype , causing a memory copy .
estimator : str or estimator instance ( default = None )
If passed , include the name of the estimator in warning messages .
Returns
X _ converted : object
The converted and validated X .""" | if isinstance ( accept_sparse , str ) :
accept_sparse = [ accept_sparse ]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr ( array , "dtype" , None )
if not hasattr ( dtype_orig , 'kind' ) : # not a data type ( e . g . a column named dtype in a pandas DataFrame )
dtype_orig = None
if dtype_numeric :
if dtype_orig is not None and dtype_orig . kind == "O" : # if input is object , convert to float .
dtype = np . float64
else :
dtype = None
if isinstance ( dtype , ( list , tuple ) ) :
if dtype_orig is not None and dtype_orig in dtype : # no dtype conversion required
dtype = None
else : # dtype conversion required . Let ' s select the first element of the
# list of accepted types .
dtype = dtype [ 0 ]
if sp . issparse ( array ) :
array = _ensure_sparse_format ( array , accept_sparse , dtype , copy , force_all_finite )
else :
array = np . array ( array , dtype = dtype , order = order , copy = copy )
if ensure_2d :
if array . ndim == 1 :
if ensure_min_samples >= 2 :
raise ValueError ( "%s expects at least 2 samples provided " "in a 2 dimensional array-like input" % estimator_name )
warnings . warn ( "Passing 1d arrays as data is deprecated in 0.17 and will" "raise ValueError in 0.19. Reshape your data either using " "X.reshape(-1, 1) if your data has a single feature or " "X.reshape(1, -1) if it contains a single sample." , DeprecationWarning )
array = np . atleast_2d ( array )
# To ensure that array flags are maintained
array = np . array ( array , dtype = dtype , order = order , copy = copy )
# make sure we acually converted to numeric :
if dtype_numeric and array . dtype . kind == "O" :
array = array . astype ( np . float64 )
if not allow_nd and array . ndim >= 3 :
raise ValueError ( "Found array with dim %d. expected <= 2." % ( array . ndim ) )
if force_all_finite :
_assert_all_finite ( array )
shape_repr = _shape_repr ( array . shape )
if ensure_min_samples > 0 :
n_samples = _num_samples ( array )
if n_samples < ensure_min_samples :
raise ValueError ( "Found array with %d sample(s) (shape=%s) while a" " minimum of %d is required." % ( n_samples , shape_repr , ensure_min_samples ) )
if ensure_min_features > 0 and array . ndim == 2 :
n_features = array . shape [ 1 ]
if n_features < ensure_min_features :
raise ValueError ( "Found array with %d feature(s) (shape=%s) while" " a minimum of %d is required." % ( n_features , shape_repr , ensure_min_features ) )
if warn_on_dtype and dtype_orig is not None and array . dtype != dtype_orig :
msg = ( "Data with input dtype %s was converted to %s." % ( dtype_orig , array . dtype ) )
warnings . warn ( msg , DataConversionWarning )
return array |
def nvmlDeviceGetAccountingStats ( handle , pid ) :
r"""* Queries process ' s accounting stats .
* For Kepler & tm ; or newer fully supported devices .
* Accounting stats capture GPU utilization and other statistics across the lifetime of a process .
* Accounting stats can be queried during life time of the process and after its termination .
* The time field in \ ref nvmlAccountingStats _ t is reported as 0 during the lifetime of the process and
* updated to actual running time after its termination .
* Accounting stats are kept in a circular buffer , newly created processes overwrite information about old
* processes .
* See \ ref nvmlAccountingStats _ t for description of each returned metric .
* List of processes that can be queried can be retrieved from \ ref nvmlDeviceGetAccountingPids .
* @ note Accounting Mode needs to be on . See \ ref nvmlDeviceGetAccountingMode .
* @ note Only compute and graphics applications stats can be queried . Monitoring applications stats can ' t be
* queried since they don ' t contribute to GPU utilization .
* @ note In case of pid collision stats of only the latest process ( that terminated last ) will be reported
* @ warning On Kepler devices per process statistics are accurate only if there ' s one process running on a GPU .
* @ param device The identifier of the target device
* @ param pid Process Id of the target process to query stats for
* @ param stats Reference in which to return the process ' s accounting stats
* @ return
* - \ ref NVML _ SUCCESS if stats have been successfully retrieved
* - \ ref NVML _ ERROR _ UNINITIALIZED if the library has not been successfully initialized
* - \ ref NVML _ ERROR _ INVALID _ ARGUMENT if \ a device is invalid or \ a stats are NULL
* - \ ref NVML _ ERROR _ NOT _ FOUND if process stats were not found
* - \ ref NVML _ ERROR _ NOT _ SUPPORTED if the device doesn ' t support this feature or accounting mode is disabled
* - \ ref NVML _ ERROR _ UNKNOWN on any unexpected error
* @ see nvmlDeviceGetAccountingBufferSize
nvmlReturn _ t DECLDIR nvmlDeviceGetAccountingStats""" | stats = c_nvmlAccountingStats_t ( )
fn = _nvmlGetFunctionPointer ( "nvmlDeviceGetAccountingStats" )
ret = fn ( handle , c_uint ( pid ) , byref ( stats ) )
_nvmlCheckReturn ( ret )
if ( stats . maxMemoryUsage == NVML_VALUE_NOT_AVAILABLE_ulonglong . value ) : # special case for WDDM on Windows , see comment above
stats . maxMemoryUsage = None
return bytes_to_str ( stats ) |
def to_dict ( self , tool_long_names = True ) :
"""Get a representation of the workflow as a dictionary for display purposes
: param tool _ long _ names : Indicates whether to use long names , such as
SplitterFromStream ( element = None , use _ mapping _ keys _ only = True )
or short names , such as
splitter _ from _ stream
: type tool _ long _ names : bool
: return : The dictionary of nodes , factors and plates""" | d = dict ( nodes = [ ] , factors = [ ] , plates = defaultdict ( list ) )
for node in self . nodes :
node_id = self . nodes [ node ] . node_id
d [ 'nodes' ] . append ( { 'id' : node_id } )
for plate_id in self . nodes [ node ] . plate_ids :
d [ 'plates' ] [ plate_id ] . append ( { 'id' : node_id , 'type' : 'node' } )
for factor in self . factors :
tool = str ( factor . tool ) if tool_long_names else factor . tool . name
try :
sources = [ s . node_id for s in factor . sources ]
except AttributeError :
if factor . source :
sources = [ factor . source . node_id ]
else :
sources = [ ]
d [ 'factors' ] . append ( { 'id' : tool , 'sources' : sources , 'sink' : factor . sink . node_id } )
try :
if factor . plates :
for plate in factor . plates :
d [ 'plates' ] [ plate . plate_id ] . append ( { 'id' : tool , 'type' : 'factor' } )
else :
d [ 'plates' ] [ 'root' ] . append ( { 'id' : tool , 'type' : 'factor' } )
except AttributeError :
pass
d [ 'plates' ] = dict ( d [ 'plates' ] )
return d |
def _print_beam ( self , sequences : mx . nd . NDArray , accumulated_scores : mx . nd . NDArray , finished : mx . nd . NDArray , inactive : mx . nd . NDArray , constraints : List [ Optional [ constrained . ConstrainedHypothesis ] ] , timestep : int ) -> None :
"""Prints the beam for debugging purposes .
: param sequences : The beam histories ( shape : batch _ size * beam _ size , max _ output _ len ) .
: param accumulated _ scores : The accumulated scores for each item in the beam .
Shape : ( batch _ size * beam _ size , target _ vocab _ size ) .
: param finished : Indicates which items are finished ( shape : batch _ size * beam _ size ) .
: param inactive : Indicates any inactive items ( shape : batch _ size * beam _ size ) .
: param timestep : The current timestep .""" | logger . info ( 'BEAM AT TIMESTEP %d' , timestep )
batch_beam_size = sequences . shape [ 0 ]
for i in range ( batch_beam_size ) : # for each hypothesis , print its entire history
score = accumulated_scores [ i ] . asscalar ( )
word_ids = [ int ( x . asscalar ( ) ) for x in sequences [ i ] ]
unmet = constraints [ i ] . num_needed ( ) if constraints [ i ] is not None else - 1
hypothesis = '----------' if inactive [ i ] else ' ' . join ( [ self . vocab_target_inv [ x ] for x in word_ids if x != 0 ] )
logger . info ( '%d %d %d %d %.2f %s' , i + 1 , finished [ i ] . asscalar ( ) , inactive [ i ] . asscalar ( ) , unmet , score , hypothesis ) |
def convert_errno ( e ) :
"""Convert an errno value ( as from an ` ` OSError ` ` or ` ` IOError ` ` ) into a
standard SFTP result code . This is a convenience function for trapping
exceptions in server code and returning an appropriate result .
: param int e : an errno code , as from ` ` OSError . errno ` ` .
: return : an ` int ` SFTP error code like ` ` SFTP _ NO _ SUCH _ FILE ` ` .""" | if e == errno . EACCES : # permission denied
return SFTP_PERMISSION_DENIED
elif ( e == errno . ENOENT ) or ( e == errno . ENOTDIR ) : # no such file
return SFTP_NO_SUCH_FILE
else :
return SFTP_FAILURE |
def list_color_tag ( self , pkg ) :
"""Tag with color installed packages""" | name = GetFromInstalled ( pkg ) . name ( )
find = name + self . meta . sp
if pkg . endswith ( ".txz" ) or pkg . endswith ( ".tgz" ) :
find = pkg [ : - 4 ]
if find_package ( find , self . meta . pkg_path ) :
pkg = "{0}{1}{2}" . format ( self . meta . color [ "GREEN" ] , pkg , self . meta . color [ "ENDC" ] )
return pkg |
def csd ( timeseries , other , segmentlength , noverlap = None , ** kwargs ) :
"""Calculate the CSD of two ` TimeSeries ` using Welch ' s method
Parameters
timeseries : ` ~ gwpy . timeseries . TimeSeries `
time - series of data
other : ` ~ gwpy . timeseries . TimeSeries `
time - series of data
segmentlength : ` int `
number of samples in single average .
noverlap : ` int `
number of samples to overlap between segments , defaults to 50 % .
* * kwargs
other keyword arguments are passed to : meth : ` scipy . signal . csd `
Returns
spectrum : ` ~ gwpy . frequencyseries . FrequencySeries `
average power ` FrequencySeries `
See also
scipy . signal . csd""" | # calculate CSD
try :
freqs , csd_ = scipy . signal . csd ( timeseries . value , other . value , noverlap = noverlap , fs = timeseries . sample_rate . decompose ( ) . value , nperseg = segmentlength , ** kwargs )
except AttributeError as exc :
exc . args = ( '{}, scipy>=0.16 is required' . format ( str ( exc ) ) , )
raise
# generate FrequencySeries and return
unit = scale_timeseries_unit ( timeseries . unit , kwargs . get ( 'scaling' , 'density' ) )
return FrequencySeries ( csd_ , unit = unit , frequencies = freqs , name = str ( timeseries . name ) + '---' + str ( other . name ) , epoch = timeseries . epoch , channel = timeseries . channel ) |
def derivativeY ( self , mLvl , pLvl , MedShk ) :
'''Evaluate the derivative of consumption and medical care with respect to
permanent income at given levels of market resources , permanent income ,
and medical need shocks .
Parameters
mLvl : np . array
Market resource levels .
pLvl : np . array
Permanent income levels ; should be same size as mLvl .
MedShk : np . array
Medical need shocks ; should be same size as mLvl .
Returns
dcdp : np . array
Derivative of consumption with respect to permanent income for each
point in ( xLvl , MedShk ) .
dMeddp : np . array
Derivative of medical care with respect to permanent income for each
point in ( xLvl , MedShk ) .''' | xLvl = self . xFunc ( mLvl , pLvl , MedShk )
dxdp = self . xFunc . derivativeY ( mLvl , pLvl , MedShk )
dcdx = self . cFunc . derivativeX ( xLvl , MedShk )
dcdp = dxdp * dcdx
dMeddp = ( dxdp - dcdp ) / self . MedPrice
return dcdp , dMeddp |
def remove ( self , key ) :
"""Remove the first key - value pair with key * key * .
If the key was not found , a ` ` KeyError ` ` is raised .""" | self . _find_lt ( key )
node = self . _path [ 0 ] [ 2 ]
if node is self . _tail or key < node [ 0 ] :
raise KeyError ( '{!r} is not in list' . format ( key ) )
self . _remove ( node ) |
def grad_global_norm ( parameters , max_norm ) :
"""Calculate the 2 - norm of gradients of parameters , and how much they should be scaled down
such that their 2 - norm does not exceed ` max _ norm ` .
If gradients exist for more than one context for a parameter , user needs to explicitly call
` ` trainer . allreduce _ grads ` ` so that the gradients are summed first before calculating
the 2 - norm .
. . note : :
This function is only for use when ` update _ on _ kvstore ` is set to False in trainer .
Example : :
trainer = Trainer ( net . collect _ params ( ) , update _ on _ kvstore = False , . . . )
for x , y in mx . gluon . utils . split _ and _ load ( X , [ mx . gpu ( 0 ) , mx . gpu ( 1 ) ] ) :
with mx . autograd . record ( ) :
y = net ( x )
loss = loss _ fn ( y , label )
loss . backward ( )
trainer . allreduce _ grads ( )
norm , ratio = grad _ global _ norm ( net . collect _ params ( ) . values ( ) , max _ norm )
trainer . update ( batch _ size * ratio )
Parameters
parameters : list of Parameters
Returns
NDArray
Total norm . Shape is ( 1 , )
NDArray
Ratio for rescaling gradients based on max _ norm s . t . grad = grad / ratio .
If total norm is NaN , ratio will be NaN , too . Shape is ( 1 , )
NDArray
Whether the total norm is finite . Shape is ( 1 , )""" | # collect gradient arrays
arrays = [ ]
idx = 0
for p in parameters :
if p . grad_req != 'null' :
p_grads = p . list_grad ( )
arrays . append ( p_grads [ idx % len ( p_grads ) ] )
idx += 1
assert len ( arrays ) > 0 , 'No parameter found available for gradient norm.'
# compute gradient norms
def _norm ( array ) : # TODO ( haibin ) norm operator does not support fp16 safe reduction .
# Issue is tracked at : https : / / github . com / apache / incubator - mxnet / issues / 14126
x = array . reshape ( ( - 1 , ) ) . astype ( 'float32' , copy = False )
return nd . dot ( x , x )
norm_arrays = [ _norm ( arr ) for arr in arrays ]
# group norm arrays by ctx
def group_by_ctx ( arr_list ) :
groups = collections . defaultdict ( list )
for arr in arr_list :
ctx = arr . context
groups [ ctx ] . append ( arr )
return groups
norm_groups = group_by_ctx ( norm_arrays )
# reduce
ctx , dtype = arrays [ 0 ] . context , 'float32'
norms = [ nd . add_n ( * g ) . as_in_context ( ctx ) for g in norm_groups . values ( ) ]
total_norm = nd . add_n ( * norms ) . sqrt ( )
scale = total_norm / max_norm
# is _ finite = 0 if NaN or Inf , 1 otherwise .
is_finite = nd . contrib . isfinite ( scale )
# if scale is finite , nd . maximum selects the max between scale and 1 . That is ,
# 1 is returned if total _ norm does not exceed max _ norm .
# if scale = NaN or Inf , the result of nd . minimum is undefined . Therefore , we use
# choices . take to return NaN or Inf .
scale_or_one = nd . maximum ( nd . ones ( ( 1 , ) , dtype = dtype , ctx = ctx ) , scale )
choices = nd . concat ( scale , scale_or_one , dim = 0 )
chosen_scale = choices . take ( is_finite )
return total_norm , chosen_scale , is_finite |
def help ( self , subject = None , args = None ) :
"""Get help information about Automation API .
The following values can be specified for the subject :
None - - gets an overview of help .
' commands ' - - gets a list of API functions
command name - - get info about the specified command .
object type - - get info about the specified object type
handle value - - get info about the object type referred to
Arguments :
subject - - Optional . Subject to get help on .
args - - Optional . Additional arguments for searching help . These
are used when the subject is ' list ' .
Return :
String of help information .""" | if subject :
if subject not in ( 'commands' , 'create' , 'config' , 'get' , 'delete' , 'perform' , 'connect' , 'connectall' , 'disconnect' , 'disconnectall' , 'apply' , 'log' , 'help' ) :
self . _check_session ( )
status , data = self . _rest . get_request ( 'help' , subject , args )
else :
status , data = self . _rest . get_request ( 'help' )
if isinstance ( data , ( list , tuple , set ) ) :
return ' ' . join ( ( str ( i ) for i in data ) )
return data [ 'message' ] |
def do_help ( self , argv ) :
"""$ { cmd _ name } : give detailed help on a specific sub - command
Usage :
$ { name } help [ COMMAND ]""" | if len ( argv ) > 1 : # asking for help on a particular command
doc = None
cmdname = self . _get_canonical_cmd_name ( argv [ 1 ] ) or argv [ 1 ]
if not cmdname :
return self . helpdefault ( argv [ 1 ] , False )
else :
helpfunc = getattr ( self , "help_" + cmdname , None )
if helpfunc :
doc = helpfunc ( )
else :
handler = self . _get_cmd_handler ( cmdname )
if handler :
doc = handler . __doc__
if doc is None :
return self . helpdefault ( argv [ 1 ] , handler != None )
else : # bare " help " command
doc = self . __class__ . __doc__
# try class docstring
if doc is None : # Try to provide some reasonable useful default help .
if self . cmdlooping :
prefix = ""
else :
prefix = self . name + ' '
doc = """Usage:
%sCOMMAND [ARGS...]
%shelp [COMMAND]
${option_list}
${command_list}
${help_list}
""" % ( prefix , prefix )
cmdname = None
if doc : # * do * have help content , massage and print that
doc = self . _help_reindent ( doc )
doc = self . _help_preprocess ( doc , cmdname )
doc = doc . rstrip ( ) + '\n'
# trim down trailing space
self . stdout . write ( self . _str ( doc ) )
self . stdout . flush ( ) |
def get_monitor_ping ( request ) :
"""MNCore . ping ( ) → Boolean .""" | response = d1_gmn . app . views . util . http_response_with_boolean_true_type ( )
d1_gmn . app . views . headers . add_http_date_header ( response )
return response |
def summary ( args ) :
"""% prog summary txtfile fastafile
The txtfile can be generated by : % prog mstmap - - noheader - - freq = 0
Tabulate on all possible combinations of genotypes and provide results
in a nicely - formatted table . Give a fastafile for SNP rate ( average
# of SNPs per Kb ) .
Only three - column file is supported :
locus _ id intra - genotype inter - genotype""" | from jcvi . utils . cbook import thousands
from jcvi . utils . table import tabulate
p = OptionParser ( summary . __doc__ )
p . add_option ( "--counts" , help = "Print SNP counts in a txt file [default: %default]" )
p . add_option ( "--bed" , help = "Print SNPs locations in a bed file [default: %default]" )
opts , args = p . parse_args ( args )
if len ( args ) != 2 :
sys . exit ( not p . print_help ( ) )
txtfile , fastafile = args
bedfw = open ( opts . bed , "w" ) if opts . bed else None
fp = open ( txtfile )
header = fp . next ( ) . split ( )
# Header
snps = defaultdict ( list )
# contig = > list of loci
combinations = defaultdict ( int )
intraSNPs = interSNPs = 0
distinctSet = set ( )
# set of genes that show A - B pattern
ref , alt = header [ 1 : 3 ]
snpcounts , goodsnpcounts = defaultdict ( int ) , defaultdict ( int )
for row in fp :
atoms = row . split ( )
assert len ( atoms ) == 3 , "Only three-column file is supported"
locus , intra , inter = atoms
ctg , pos = locus . rsplit ( "." , 1 )
pos = int ( pos )
snps [ ctg ] . append ( pos )
snpcounts [ ctg ] += 1
if intra == 'X' :
intraSNPs += 1
if inter in ( 'B' , 'X' ) :
interSNPs += 1
if intra == 'A' and inter == 'B' :
distinctSet . add ( ctg )
goodsnpcounts [ ctg ] += 1
# Tabulate all possible combinations
intra = ref + "-" + intra
inter = alt + "-" + inter
combinations [ ( intra , inter ) ] += 1
if bedfw :
print ( "\t" . join ( str ( x ) for x in ( ctg , pos - 1 , pos , locus ) ) , file = bedfw )
if bedfw :
logging . debug ( "SNP locations written to `{0}`." . format ( opts . bed ) )
bedfw . close ( )
nsites = sum ( len ( x ) for x in snps . values ( ) )
sizes = Sizes ( fastafile )
bpsize = sizes . totalsize
snprate = lambda a : a * 1000. / bpsize
m = "Dataset `{0}` contains {1} contigs ({2} bp).\n" . format ( fastafile , len ( sizes ) , thousands ( bpsize ) )
m += "A total of {0} SNPs within {1} contigs ({2} bp).\n" . format ( nsites , len ( snps ) , thousands ( sum ( sizes . mapping [ x ] for x in snps . keys ( ) ) ) )
m += "SNP rate: {0:.1f}/Kb, " . format ( snprate ( nsites ) )
m += "IntraSNPs: {0} ({1:.1f}/Kb), InterSNPs: {2} ({3:.1f}/Kb)" . format ( intraSNPs , snprate ( intraSNPs ) , interSNPs , snprate ( interSNPs ) )
print ( m , file = sys . stderr )
print ( tabulate ( combinations ) , file = sys . stderr )
leg = "Legend: A - homozygous same, B - homozygous different, X - heterozygous"
print ( leg , file = sys . stderr )
tag = ( ref + "-A" , alt + "-B" )
distinctSNPs = combinations [ tag ]
tag = str ( tag ) . replace ( "'" , "" )
print ( "A total of {0} disparate {1} SNPs in {2} contigs." . format ( distinctSNPs , tag , len ( distinctSet ) ) , file = sys . stderr )
if not opts . counts :
return
snpcountsfile = opts . counts
fw = open ( snpcountsfile , "w" )
header = "\t" . join ( ( "Contig" , "#_SNPs" , "#_AB_SNP" ) )
print ( header , file = fw )
assert sum ( snpcounts . values ( ) ) == nsites
assert sum ( goodsnpcounts . values ( ) ) == distinctSNPs
for ctg in sorted ( snps . keys ( ) ) :
snpcount = snpcounts [ ctg ]
goodsnpcount = goodsnpcounts [ ctg ]
print ( "\t" . join ( str ( x ) for x in ( ctg , snpcount , goodsnpcount ) ) , file = fw )
fw . close ( )
logging . debug ( "SNP counts per contig is written to `{0}`." . format ( snpcountsfile ) ) |
def show_fibrechannel_interface_info_output_show_fibrechannel_interface_portsgroup_rbridgeid ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
show_fibrechannel_interface_info = ET . Element ( "show_fibrechannel_interface_info" )
config = show_fibrechannel_interface_info
output = ET . SubElement ( show_fibrechannel_interface_info , "output" )
show_fibrechannel_interface = ET . SubElement ( output , "show-fibrechannel-interface" )
portsgroup_rbridgeid = ET . SubElement ( show_fibrechannel_interface , "portsgroup-rbridgeid" )
portsgroup_rbridgeid . text = kwargs . pop ( 'portsgroup_rbridgeid' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _make_constants ( func , builtin_only = False , stoplist = None , verbose = None ) :
"""Generate new function where code is an input function code with all
LOAD _ GLOBAL statements changed to LOAD _ CONST statements .
: param function func : code function to transform .
: param bool builtin _ only : only transform builtin objects .
: param list stoplist : attribute names to not transform .
: param function verbose : logger function which takes in parameter a message
. . warning : :
Be sure global attributes to transform are not resolved dynamically .""" | result = func
if stoplist is None :
stoplist = [ ]
try :
fcode = func . __code__
except AttributeError :
return func
# Jython doesn ' t have a _ _ code _ _ attribute .
newcode = list ( fcode . co_code ) if PY3 else [ ord ( co ) for co in fcode . co_code ]
newconsts = list ( fcode . co_consts )
names = fcode . co_names
codelen = len ( newcode )
env = vars ( builtins ) . copy ( )
if builtin_only :
stoplist = dict . fromkeys ( stoplist )
stoplist . update ( func . __globals__ )
else :
env . update ( func . __globals__ )
# First pass converts global lookups into constants
changed = False
i = 0
while i < codelen :
opcode = newcode [ i ]
if opcode in ( EXTENDED_ARG , STORE_GLOBAL ) :
return func
# for simplicity , only optimize common cases
if opcode == LOAD_GLOBAL :
oparg = newcode [ i + 1 ] + ( newcode [ i + 2 ] << 8 )
name = fcode . co_names [ oparg ]
if name in env and name not in stoplist :
value = env [ name ]
for pos , val in enumerate ( newconsts ) :
if val is value :
break
else :
pos = len ( newconsts )
newconsts . append ( value )
newcode [ i ] = LOAD_CONST
newcode [ i + 1 ] = pos & 0xFF
newcode [ i + 2 ] = pos >> 8
changed = True
if verbose is not None :
verbose ( "{0} --> {1}" . format ( name , value ) )
i += 1
if opcode >= HAVE_ARGUMENT :
i += 2
# Second pass folds tuples of constants and constant attribute lookups
i = 0
while i < codelen :
newtuple = [ ]
while newcode [ i ] == LOAD_CONST :
oparg = newcode [ i + 1 ] + ( newcode [ i + 2 ] << 8 )
newtuple . append ( newconsts [ oparg ] )
i += 3
opcode = newcode [ i ]
if not newtuple :
i += 1
if opcode >= HAVE_ARGUMENT :
i += 2
continue
if opcode == LOAD_ATTR :
obj = newtuple [ - 1 ]
oparg = newcode [ i + 1 ] + ( newcode [ i + 2 ] << 8 )
name = names [ oparg ]
try :
value = getattr ( obj , name )
except AttributeError :
continue
deletions = 1
elif opcode == BUILD_TUPLE :
oparg = newcode [ i + 1 ] + ( newcode [ i + 2 ] << 8 )
if oparg != len ( newtuple ) :
continue
deletions = len ( newtuple )
value = tuple ( newtuple )
else :
continue
reljump = deletions * 3
newcode [ i - reljump ] = JUMP_FORWARD
newcode [ i - reljump + 1 ] = ( reljump - 3 ) & 0xFF
newcode [ i - reljump + 2 ] = ( reljump - 3 ) >> 8
nclen = len ( newconsts )
newconsts . append ( value )
newcode [ i ] = LOAD_CONST
newcode [ i + 1 ] = nclen & 0xFF
newcode [ i + 2 ] = nclen >> 8
i += 3
changed = True
if verbose is not None :
verbose ( "new folded constant:{0}" . format ( value ) )
if changed :
codeobj = getcodeobj ( newconsts , newcode , fcode , fcode )
result = type ( func ) ( codeobj , func . __globals__ , func . __name__ , func . __defaults__ , func . __closure__ )
# set func attributes to result
for prop in WRAPPER_ASSIGNMENTS :
try :
attr = getattr ( func , prop )
except AttributeError :
pass
else :
setattr ( result , prop , attr )
return result |
def namespace_map ( self , target ) :
"""Returns the namespace _ map used for Thrift generation .
: param target : The target to extract the namespace _ map from .
: type target : : class : ` pants . backend . codegen . targets . java _ thrift _ library . JavaThriftLibrary `
: returns : The namespaces to remap ( old to new ) .
: rtype : dictionary""" | self . _check_target ( target )
return target . namespace_map or self . _default_namespace_map |
def show_buff ( self , pos ) :
"""Return the display of the instruction
: rtype : string""" | buff = self . get_name ( ) + " "
for i in range ( 0 , len ( self . keys ) ) :
buff += "%x:%x " % ( self . keys [ i ] , self . targets [ i ] )
return buff |
def to_json_object ( self ) :
"""Returns a dict representation that can be serialized to JSON .""" | obj_dict = dict ( namespace_start = self . namespace_start , namespace_end = self . namespace_end )
if self . app is not None :
obj_dict [ 'app' ] = self . app
return obj_dict |
def do_req ( self , method , url , body = None , headers = None , status = None ) :
"""Used internally to send a request to the API , left public
so it can be used to talk to the API more directly .""" | if body is None :
body = ''
else :
body = json . dumps ( body )
res = self . backend . dispatch_request ( method = method , url = url , body = body , headers = self . get_headers ( headers ) , auth = self . auth )
if not isinstance ( res , MapiResponse ) :
res = MapiResponse ( * res )
if status is None :
if res . status // 100 != 2 :
raise MapiError ( * res )
elif res . status != status :
raise MapiError ( * res )
return res |
def scroll_backward_vertically ( self , steps = 10 , * args , ** selectors ) :
"""Perform scroll backward ( vertically ) action on the object which has * selectors * attributes .
Return whether the object can be Scroll or not .
See ` Scroll Forward Vertically ` for more details .""" | return self . device ( ** selectors ) . scroll . vert . backward ( steps = steps ) |
def _days_from_3744 ( hebrew_year ) :
"""Return : Number of days since 3,1,3744.""" | # Start point for calculation is Molad new year 3744 ( 16BC )
years_from_3744 = hebrew_year - 3744
molad_3744 = get_chalakim ( 1 + 6 , 779 )
# Molad 3744 + 6 hours in parts
# Time in months
# Number of leap months
leap_months = ( years_from_3744 * 7 + 1 ) // 19
leap_left = ( years_from_3744 * 7 + 1 ) % 19
# Months left of leap cycle
months = years_from_3744 * 12 + leap_months
# Total Number of months
# Time in parts and days
# Molad This year + Molad 3744 - corrections
parts = months * PARTS_IN_MONTH + molad_3744
# 28 days in month + corrections
days = months * 28 + parts // PARTS_IN_DAY - 2
# Time left for round date in corrections
# 28 % 7 = 0 so only corrections counts
parts_left_in_week = parts % PARTS_IN_WEEK
parts_left_in_day = parts % PARTS_IN_DAY
week_day = parts_left_in_week // PARTS_IN_DAY
# pylint : disable = too - many - boolean - expressions
# pylint - comment : Splitting the ' if ' below might create a bug in case
# the order is not kept .
# Molad ד " ר ט " ג
if ( ( leap_left < 12 and week_day == 3 and parts_left_in_day >= get_chalakim ( 9 + 6 , 204 ) ) or # Molad ט " פקת ו " טב
( leap_left < 7 and week_day == 2 and parts_left_in_day >= get_chalakim ( 15 + 6 , 589 ) ) ) :
days += 1
week_day += 1
# pylint : enable = too - many - boolean - expressions
# ADU
if week_day in ( 1 , 4 , 6 ) :
days += 1
return days |
def granulometry_filter ( image , min_radius , max_radius , mask = None ) :
'''Enhances bright structures within a min and max radius using a rolling ball filter
image - grayscale 2 - d image
radii - a vector of radii : we enhance holes at each given radius''' | # Do 4 - connected erosion
se = np . array ( [ [ False , True , False ] , [ True , True , True ] , [ False , True , False ] ] )
# Initialize
inverted_image = image . max ( ) - image
previous_opened_image = image
eroded_image = image
selected_granules_image = np . zeros ( image . shape )
# Select granules by successive morphological openings
for i in range ( max_radius + 1 ) :
eroded_image = grey_erosion ( eroded_image , mask = mask , footprint = se )
opened_image = grey_dilation ( eroded_image , inverted_image , footprint = se )
output_image = previous_opened_image - opened_image
if i >= min_radius :
selected_granules_image = np . maximum ( selected_granules_image , output_image )
previous_opened_image = opened_image
return selected_granules_image |
def _make_phylesystem_cache_region ( ** kwargs ) :
"""Only intended to be called by the Phylesystem singleton .""" | global _CACHE_REGION_CONFIGURED , _REGION
if _CACHE_REGION_CONFIGURED :
return _REGION
_CACHE_REGION_CONFIGURED = True
try : # noinspection PyPackageRequirements
from dogpile . cache import make_region
except :
_LOG . debug ( 'dogpile.cache not available' )
return
region = None
trial_key = 'test_key'
trial_val = { 'test_val' : [ 4 , 3 ] }
trying_redis = True
if trying_redis :
try :
a = { 'host' : 'localhost' , 'port' : 6379 , 'db' : 0 , # default is 0
'redis_expiration_time' : 60 * 60 * 24 * 2 , # 2 days
'distributed_lock' : False # True if multiple processes will use redis
}
region = make_region ( ) . configure ( 'dogpile.cache.redis' , arguments = a )
_LOG . debug ( 'cache region set up with cache.redis.' )
_LOG . debug ( 'testing redis caching...' )
region . set ( trial_key , trial_val )
assert trial_val == region . get ( trial_key )
_LOG . debug ( 'redis caching works' )
region . delete ( trial_key )
_REGION = region
return region
except :
_LOG . debug ( 'redis cache set up failed.' )
region = None
trying_file_dbm = False
if trying_file_dbm :
_LOG . debug ( 'Going to try dogpile.cache.dbm ...' )
first_par = _get_phylesystem_parent ( ** kwargs ) [ 0 ]
cache_db_dir = os . path . split ( first_par ) [ 0 ]
cache_db = os . path . join ( cache_db_dir , 'phylesystem-cachefile.dbm' )
_LOG . debug ( 'dogpile.cache region using "{}"' . format ( cache_db ) )
try :
a = { 'filename' : cache_db }
region = make_region ( ) . configure ( 'dogpile.cache.dbm' , expiration_time = 36000 , arguments = a )
_LOG . debug ( 'cache region set up with cache.dbm.' )
_LOG . debug ( 'testing anydbm caching...' )
region . set ( trial_key , trial_val )
assert trial_val == region . get ( trial_key )
_LOG . debug ( 'anydbm caching works' )
region . delete ( trial_key )
_REGION = region
return region
except :
_LOG . debug ( 'anydbm cache set up failed' )
_LOG . debug ( 'exception in the configuration of the cache.' )
_LOG . debug ( 'Phylesystem will not use caching' )
return None |
def map_rus_to_lat ( char ) :
"""функция преобразует русские символы в такие же ( по написанию ) латинские""" | map_dict = { u'Е' : u'E' , u'Т' : u'T' , u'У' : u'Y' , u'О' : u'O' , u'Р' : u'P' , u'А' : u'A' , u'Н' : u'H' , u'К' : u'K' , u'Х' : u'X' , u'С' : u'C' , u'В' : u'B' , u'М' : u'M' , }
# если буква есть в списке русских сиволов , преобразуем ее в латинский двойник
if char in map_dict :
return map_dict [ char ]
else :
return char |
def _item_to_entry ( iterator , entry_pb , loggers ) :
"""Convert a log entry protobuf to the native object .
. . note : :
This method does not have the correct signature to be used as
the ` ` item _ to _ value ` ` argument to
: class : ` ~ google . api _ core . page _ iterator . Iterator ` . It is intended to be
patched with a mutable ` ` loggers ` ` argument that can be updated
on subsequent calls . For an example , see how the method is
used above in : meth : ` _ LoggingAPI . list _ entries ` .
: type iterator : : class : ` ~ google . api _ core . page _ iterator . Iterator `
: param iterator : The iterator that is currently in use .
: type entry _ pb : : class : ` . log _ entry _ pb2 . LogEntry `
: param entry _ pb : Log entry protobuf returned from the API .
: type loggers : dict
: param loggers :
A mapping of logger fullnames - > loggers . If the logger
that owns the entry is not in ` ` loggers ` ` , the entry
will have a newly - created logger .
: rtype : : class : ` ~ google . cloud . logging . entries . _ BaseEntry `
: returns : The next log entry in the page .""" | resource = _parse_log_entry ( entry_pb )
return entry_from_resource ( resource , iterator . client , loggers ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.